summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/codetag.lds.h16
-rw-r--r--include/asm-generic/hugetlb.h17
-rw-r--r--include/asm-generic/msi.h1
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/crypto/engine.h1
-rw-r--r--include/crypto/internal/acompress.h5
-rw-r--r--include/crypto/internal/engine.h15
-rw-r--r--include/crypto/internal/hash.h36
-rw-r--r--include/drm/amd/isp.h51
-rw-r--r--include/drm/display/drm_dp.h24
-rw-r--r--include/drm/display/drm_dp_helper.h17
-rw-r--r--include/drm/display/drm_hdmi_audio_helper.h1
-rw-r--r--include/drm/display/drm_hdmi_cec_helper.h72
-rw-r--r--include/drm/drm_accel.h5
-rw-r--r--include/drm/drm_bridge.h375
-rw-r--r--include/drm/drm_color_mgmt.h27
-rw-r--r--include/drm/drm_connector.h60
-rw-r--r--include/drm/drm_debugfs.h11
-rw-r--r--include/drm/drm_device.h11
-rw-r--r--include/drm/drm_drv.h22
-rw-r--r--include/drm/drm_edid.h8
-rw-r--r--include/drm/drm_file.h7
-rw-r--r--include/drm/drm_format_helper.h19
-rw-r--r--include/drm/drm_fourcc.h3
-rw-r--r--include/drm/drm_gem.h13
-rw-r--r--include/drm/drm_gem_framebuffer_helper.h6
-rw-r--r--include/drm/drm_gem_shmem_helper.h11
-rw-r--r--include/drm/drm_gem_vram_helper.h2
-rw-r--r--include/drm/drm_gpusvm.h101
-rw-r--r--include/drm/drm_gpuvm.h8
-rw-r--r--include/drm/drm_managed.h15
-rw-r--r--include/drm/drm_mipi_dsi.h2
-rw-r--r--include/drm/drm_mode_config.h3
-rw-r--r--include/drm/drm_modeset_helper.h2
-rw-r--r--include/drm/drm_pagemap.h135
-rw-r--r--include/drm/drm_panic.h6
-rw-r--r--include/drm/drm_prime.h3
-rw-r--r--include/drm/gpu_scheduler.h40
-rw-r--r--include/drm/intel/pciids.h12
-rw-r--r--include/drm/ttm/ttm_bo.h73
-rw-r--r--include/drm/ttm/ttm_device.h1
-rw-r--r--include/dt-bindings/clock/ast2600-clock.h2
-rw-r--r--include/dt-bindings/clock/nxp,imx94-clock.h13
-rw-r--r--include/dt-bindings/clock/qcom,ipq5018-cmn-pll.h16
-rw-r--r--include/dt-bindings/clock/qcom,ipq5424-cmn-pll.h22
-rw-r--r--include/dt-bindings/clock/qcom,milos-camcc.h131
-rw-r--r--include/dt-bindings/clock/qcom,milos-dispcc.h61
-rw-r--r--include/dt-bindings/clock/qcom,milos-gcc.h210
-rw-r--r--include/dt-bindings/clock/qcom,milos-gpucc.h56
-rw-r--r--include/dt-bindings/clock/qcom,milos-videocc.h36
-rw-r--r--include/dt-bindings/clock/qcom,qcs615-camcc.h110
-rw-r--r--include/dt-bindings/clock/qcom,qcs615-dispcc.h52
-rw-r--r--include/dt-bindings/clock/qcom,qcs615-gpucc.h39
-rw-r--r--include/dt-bindings/clock/qcom,qcs615-videocc.h30
-rw-r--r--include/dt-bindings/clock/qcom,x1e80100-gcc.h2
-rw-r--r--include/dt-bindings/clock/r9a07g043-cpg.h53
-rw-r--r--include/dt-bindings/clock/r9a07g044-cpg.h58
-rw-r--r--include/dt-bindings/clock/r9a07g054-cpg.h58
-rw-r--r--include/dt-bindings/clock/r9a08g045-cpg.h71
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h29
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h29
-rw-r--r--include/dt-bindings/clock/samsung,exynosautov920.h9
-rw-r--r--include/dt-bindings/power/qcom-rpmpd.h1
-rw-r--r--include/kvm/arm_vgic.h11
-rw-r--r--include/linux/acpi.h2
-rw-r--r--include/linux/adi-axi-common.h56
-rw-r--r--include/linux/alloc_tag.h6
-rw-r--r--include/linux/amd-iommu.h25
-rw-r--r--include/linux/avf/virtchnl.h23
-rw-r--r--include/linux/balloon_compaction.h90
-rw-r--r--include/linux/bcm47xx_nvram.h1
-rw-r--r--include/linux/bcm47xx_sprom.h2
-rw-r--r--include/linux/bitfield.h8
-rw-r--r--include/linux/bits.h29
-rw-r--r--include/linux/bnxt/hsi.h10914
-rw-r--r--include/linux/bpf-cgroup-defs.h1
-rw-r--r--include/linux/bpf-cgroup.h1
-rw-r--r--include/linux/bpf.h125
-rw-r--r--include/linux/bpf_verifier.h81
-rw-r--r--include/linux/brcmphy.h6
-rw-r--r--include/linux/btf.h3
-rw-r--r--include/linux/can/bittiming.h2
-rw-r--r--include/linux/can/dev.h4
-rw-r--r--include/linux/cgroup-defs.h21
-rw-r--r--include/linux/clk-provider.h26
-rw-r--r--include/linux/codetag.h1
-rw-r--r--include/linux/compiler-clang.h3
-rw-r--r--include/linux/compiler_types.h4
-rw-r--r--include/linux/coredump.h2
-rw-r--r--include/linux/cpu.h26
-rw-r--r--include/linux/cpumask.h40
-rw-r--r--include/linux/crypto.h3
-rw-r--r--include/linux/damon.h80
-rw-r--r--include/linux/dax.h9
-rw-r--r--include/linux/device-mapper.h2
-rw-r--r--include/linux/dma-fence.h45
-rw-r--r--include/linux/dmapool.h8
-rw-r--r--include/linux/dpll.h21
-rw-r--r--include/linux/ethtool.h29
-rw-r--r--include/linux/ethtool_netlink.h7
-rw-r--r--include/linux/filter.h18
-rw-r--r--include/linux/find.h29
-rw-r--r--include/linux/fortify-string.h2
-rw-r--r--include/linux/fpga/adi-axi-common.h23
-rw-r--r--include/linux/fprobe.h5
-rw-r--r--include/linux/fs.h25
-rw-r--r--include/linux/fsnotify.h35
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/gfp.h7
-rw-r--r--include/linux/hid.h8
-rw-r--r--include/linux/highmem-internal.h2
-rw-r--r--include/linux/highmem.h12
-rw-r--r--include/linux/hisi_acc_qm.h4
-rw-r--r--include/linux/huge_mm.h52
-rw-r--r--include/linux/hugetlb.h20
-rw-r--r--include/linux/ieee80211.h53
-rw-r--r--include/linux/if_team.h3
-rw-r--r--include/linux/if_tun.h5
-rw-r--r--include/linux/if_vlan.h23
-rw-r--r--include/linux/in6.h7
-rw-r--r--include/linux/init.h2
-rw-r--r--include/linux/intel_dg_nvm_aux.h32
-rw-r--r--include/linux/iommu.h76
-rw-r--r--include/linux/iommufd.h196
-rw-r--r--include/linux/ipv6.h2
-rw-r--r--include/linux/irq-entry-common.h16
-rw-r--r--include/linux/irqbypass.h46
-rw-r--r--include/linux/irqchip/arm-gic-v4.h2
-rw-r--r--include/linux/irqchip/arm-gic-v5.h394
-rw-r--r--include/linux/irqchip/arm-vgic-info.h4
-rw-r--r--include/linux/irqdomain.h3
-rw-r--r--include/linux/kernel.h6
-rw-r--r--include/linux/khugepaged.h4
-rw-r--r--include/linux/kmod.h3
-rw-r--r--include/linux/ksm.h12
-rw-r--r--include/linux/kvm_dirty_ring.h18
-rw-r--r--include/linux/kvm_host.h36
-rw-r--r--include/linux/kvm_irqfd.h5
-rw-r--r--include/linux/led-class-flash.h2
-rw-r--r--include/linux/leds.h1
-rw-r--r--include/linux/libnvdimm.h15
-rw-r--r--include/linux/llist.h6
-rw-r--r--include/linux/local_lock.h20
-rw-r--r--include/linux/local_lock_internal.h30
-rw-r--r--include/linux/lockdep_types.h2
-rw-r--r--include/linux/maple_tree.h4
-rw-r--r--include/linux/mdio.h1
-rw-r--r--include/linux/memcontrol.h44
-rw-r--r--include/linux/memfd.h4
-rw-r--r--include/linux/memory-tiers.h2
-rw-r--r--include/linux/memory.h20
-rw-r--r--include/linux/memory_hotplug.h3
-rw-r--r--include/linux/mfd/davinci_voicecodec.h8
-rw-r--r--include/linux/mfd/madera/pdata.h3
-rw-r--r--include/linux/mfd/pcf50633/core.h229
-rw-r--r--include/linux/mfd/rk808.h2
-rw-r--r--include/linux/mfd/syscon/atmel-smc.h8
-rw-r--r--include/linux/mfd/tps65219.h5
-rw-r--r--include/linux/mfd/twl.h21
-rw-r--r--include/linux/mfd/wm8350/core.h10
-rw-r--r--include/linux/migrate.h46
-rw-r--r--include/linux/mlx5/device.h1
-rw-r--r--include/linux/mlx5/driver.h25
-rw-r--r--include/linux/mlx5/fs.h2
-rw-r--r--include/linux/mlx5/mlx5_ifc.h150
-rw-r--r--include/linux/mm.h85
-rw-r--r--include/linux/mm_types.h3
-rw-r--r--include/linux/mman.h4
-rw-r--r--include/linux/mmap_lock.h11
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/mmdebug.h12
-rw-r--r--include/linux/mmzone.h36
-rw-r--r--include/linux/module.h7
-rw-r--r--include/linux/mroute6.h7
-rw-r--r--include/linux/msi.h3
-rw-r--r--include/linux/mtd/map.h13
-rw-r--r--include/linux/mtd/spinand.h70
-rw-r--r--include/linux/mtd/ubi.h1
-rw-r--r--include/linux/mutex.h11
-rw-r--r--include/linux/net/intel/iidc_rdma_idpf.h55
-rw-r--r--include/linux/net/intel/libie/adminq.h308
-rw-r--r--include/linux/net/intel/libie/pctype.h41
-rw-r--r--include/linux/netdevice.h54
-rw-r--r--include/linux/netfilter.h1
-rw-r--r--include/linux/netfilter/nf_conntrack_dccp.h38
-rw-r--r--include/linux/netfilter/x_tables.h10
-rw-r--r--include/linux/netpoll.h13
-rw-r--r--include/linux/nmi.h2
-rw-r--r--include/linux/node.h77
-rw-r--r--include/linux/nodemask.h18
-rw-r--r--include/linux/of_irq.h5
-rw-r--r--include/linux/packing.h6
-rw-r--r--include/linux/padata.h4
-rw-r--r--include/linux/page-flags.h106
-rw-r--r--include/linux/page-isolation.h47
-rw-r--r--include/linux/page_owner.h8
-rw-r--r--include/linux/pageblock-flags.h56
-rw-r--r--include/linux/pagemap.h14
-rw-r--r--include/linux/pagewalk.h9
-rw-r--r--include/linux/panic.h5
-rw-r--r--include/linux/pci-tph.h1
-rw-r--r--include/linux/percpu-defs.h7
-rw-r--r--include/linux/perf/arm_pmu.h8
-rw-r--r--include/linux/pfn.h9
-rw-r--r--include/linux/pfn_t.h131
-rw-r--r--include/linux/pgtable.h118
-rw-r--r--include/linux/phy.h49
-rw-r--r--include/linux/platform_data/emc2305.h6
-rw-r--r--include/linux/platform_data/microchip-ksz.h1
-rw-r--r--include/linux/platform_data/video-pxafb.h1
-rw-r--r--include/linux/power_supply.h16
-rw-r--r--include/linux/preempt.h9
-rw-r--r--include/linux/printk.h7
-rw-r--r--include/linux/proc_fs.h1
-rw-r--r--include/linux/pse-pd/pse.h114
-rw-r--r--include/linux/psi_types.h6
-rw-r--r--include/linux/ptp_clock_kernel.h34
-rw-r--r--include/linux/ref_tracker.h50
-rw-r--r--include/linux/ring_buffer.h4
-rw-r--r--include/linux/rmap.h4
-rw-r--r--include/linux/rtmutex.h2
-rw-r--r--include/linux/rv.h86
-rw-r--r--include/linux/sched.h163
-rw-r--r--include/linux/sched/deadline.h4
-rw-r--r--include/linux/sched/ext.h23
-rw-r--r--include/linux/sched/idle.h4
-rw-r--r--include/linux/sched/nohz.h4
-rw-r--r--include/linux/sched/sd_flags.h8
-rw-r--r--include/linux/sched/task.h31
-rw-r--r--include/linux/sched/topology.h39
-rw-r--r--include/linux/shmem_fs.h5
-rw-r--r--include/linux/skbuff.h22
-rw-r--r--include/linux/soc/marvell/silicons.h25
-rw-r--r--include/linux/soc/qcom/ubwc.h75
-rw-r--r--include/linux/spi/spi-mem.h2
-rw-r--r--include/linux/srcu.h54
-rw-r--r--include/linux/srcutiny.h3
-rw-r--r--include/linux/srcutree.h38
-rw-r--r--include/linux/stop_machine.h64
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/swap.h23
-rw-r--r--include/linux/sysctl.h5
-rw-r--r--include/linux/sysfb.h6
-rw-r--r--include/linux/tcp.h4
-rw-r--r--include/linux/tnum.h2
-rw-r--r--include/linux/uaccess.h2
-rw-r--r--include/linux/usb/cdc_ncm.h1
-rw-r--r--include/linux/usb/usbnet.h3
-rw-r--r--include/linux/usb/uvc.h3
-rw-r--r--include/linux/userfaultfd_k.h15
-rw-r--r--include/linux/usermode_driver.h19
-rw-r--r--include/linux/virtio.h9
-rw-r--r--include/linux/virtio_config.h43
-rw-r--r--include/linux/virtio_features.h88
-rw-r--r--include/linux/virtio_net.h197
-rw-r--r--include/linux/virtio_pci_modern.h43
-rw-r--r--include/linux/vmstat.h4
-rw-r--r--include/linux/wait.h2
-rw-r--r--include/linux/workqueue.h34
-rw-r--r--include/linux/writeback.h11
-rw-r--r--include/linux/zsmalloc.h2
-rw-r--r--include/media/rcar-fcp.h5
-rw-r--r--include/media/v4l2-ctrls.h4
-rw-r--r--include/media/v4l2-dev.h14
-rw-r--r--include/media/v4l2-ioctl.h1
-rw-r--r--include/media/v4l2-jpeg.h9
-rw-r--r--include/media/v4l2-subdev.h3
-rw-r--r--include/media/vsp1.h89
-rw-r--r--include/net/act_api.h25
-rw-r--r--include/net/af_unix.h2
-rw-r--r--include/net/aligned_data.h22
-rw-r--r--include/net/bluetooth/bluetooth.h11
-rw-r--r--include/net/bluetooth/hci.h10
-rw-r--r--include/net/bluetooth/hci_core.h41
-rw-r--r--include/net/bond_options.h1
-rw-r--r--include/net/bonding.h3
-rw-r--r--include/net/cfg80211.h221
-rw-r--r--include/net/devlink.h18
-rw-r--r--include/net/dropreason-core.h39
-rw-r--r--include/net/dsa.h2
-rw-r--r--include/net/dst.h38
-rw-r--r--include/net/gro.h6
-rw-r--r--include/net/inet6_hashtables.h2
-rw-r--r--include/net/inet_hashtables.h8
-rw-r--r--include/net/ip.h15
-rw-r--r--include/net/ip6_route.h4
-rw-r--r--include/net/ip6_tunnel.h5
-rw-r--r--include/net/ip_tunnels.h2
-rw-r--r--include/net/libeth/rx.h28
-rw-r--r--include/net/libeth/tx.h36
-rw-r--r--include/net/libeth/types.h106
-rw-r--r--include/net/libeth/xdp.h1879
-rw-r--r--include/net/libeth/xsk.h685
-rw-r--r--include/net/lwtunnel.h8
-rw-r--r--include/net/mac80211.h69
-rw-r--r--include/net/mana/gdma.h27
-rw-r--r--include/net/mana/mana.h173
-rw-r--r--include/net/mctp.h57
-rw-r--r--include/net/ndisc.h9
-rw-r--r--include/net/neighbour.h22
-rw-r--r--include/net/netdev_queues.h9
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h3
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h13
-rw-r--r--include/net/netfilter/nf_log.h3
-rw-r--r--include/net/netfilter/nf_reject.h1
-rw-r--r--include/net/netfilter/nf_tables.h19
-rw-r--r--include/net/netfilter/nf_tables_core.h50
-rw-r--r--include/net/netlink.h14
-rw-r--r--include/net/netmem.h181
-rw-r--r--include/net/netns/conntrack.h13
-rw-r--r--include/net/netns/mctp.h20
-rw-r--r--include/net/page_pool/helpers.h14
-rw-r--r--include/net/pfcp.h2
-rw-r--r--include/net/request_sock.h4
-rw-r--r--include/net/route.h6
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/net/sock.h19
-rw-r--r--include/net/tc_act/tc_connmark.h1
-rw-r--r--include/net/tc_act/tc_csum.h10
-rw-r--r--include/net/tc_act/tc_ct.h11
-rw-r--r--include/net/tc_act/tc_ctinfo.h7
-rw-r--r--include/net/tc_act/tc_gate.h9
-rw-r--r--include/net/tc_act/tc_mpls.h10
-rw-r--r--include/net/tc_act/tc_nat.h1
-rw-r--r--include/net/tc_act/tc_pedit.h1
-rw-r--r--include/net/tc_act/tc_police.h12
-rw-r--r--include/net/tc_act/tc_sample.h9
-rw-r--r--include/net/tc_act/tc_skbedit.h1
-rw-r--r--include/net/tc_act/tc_vlan.h9
-rw-r--r--include/net/tcp.h11
-rw-r--r--include/net/tcx.h1
-rw-r--r--include/net/udp.h1
-rw-r--r--include/net/udp_tunnel.h103
-rw-r--r--include/net/vxlan.h5
-rw-r--r--include/net/x25.h1
-rw-r--r--include/net/xdp_sock.h1
-rw-r--r--include/ras/ras_event.h2
-rw-r--r--include/rdma/ib_umem.h25
-rw-r--r--include/rdma/ib_verbs.h65
-rw-r--r--include/rdma/restrack.h4
-rw-r--r--include/rv/da_monitor.h172
-rw-r--r--include/rv/ltl_monitor.h186
-rw-r--r--include/scsi/scsi_device.h5
-rw-r--r--include/scsi/scsi_transport_fc.h5
-rw-r--r--include/soc/spacemit/k1-syscon.h160
-rw-r--r--include/trace/events/alarmtimer.h2
-rw-r--r--include/trace/events/btrfs.h7
-rw-r--r--include/trace/events/cgroup.h47
-rw-r--r--include/trace/events/damon.h41
-rw-r--r--include/trace/events/dma_fence.h38
-rw-r--r--include/trace/events/ext4.h50
-rw-r--r--include/trace/events/fs_dax.h6
-rw-r--r--include/trace/events/ipi.h58
-rw-r--r--include/trace/events/kmem.h38
-rw-r--r--include/trace/events/kvm.h111
-rw-r--r--include/trace/events/mmap.h52
-rw-r--r--include/trace/events/power.h28
-rw-r--r--include/trace/events/sched.h14
-rw-r--r--include/trace/events/scsi.h13
-rw-r--r--include/trace/events/tcp.h29
-rw-r--r--include/trace/events/thp.h2
-rw-r--r--include/trace/events/writeback.h8
-rw-r--r--include/trace/events/xdp.h21
-rw-r--r--include/uapi/asm-generic/socket.h3
-rw-r--r--include/uapi/drm/amdgpu_drm.h2
-rw-r--r--include/uapi/drm/drm_fourcc.h56
-rw-r--r--include/uapi/drm/ivpu_accel.h14
-rw-r--r--include/uapi/drm/msm_drm.h149
-rw-r--r--include/uapi/drm/panfrost_drm.h21
-rw-r--r--include/uapi/drm/panthor_drm.h41
-rw-r--r--include/uapi/drm/xe_drm.h12
-rw-r--r--include/uapi/linux/bpf.h45
-rw-r--r--include/uapi/linux/capability.h5
-rw-r--r--include/uapi/linux/devlink.h16
-rw-r--r--include/uapi/linux/dpll.h13
-rw-r--r--include/uapi/linux/ethtool.h4
-rw-r--r--include/uapi/linux/ethtool_netlink.h2
-rw-r--r--include/uapi/linux/ethtool_netlink_generated.h83
-rw-r--r--include/uapi/linux/handshake.h1
-rw-r--r--include/uapi/linux/if_link.h2
-rw-r--r--include/uapi/linux/if_tun.h9
-rw-r--r--include/uapi/linux/if_xdp.h1
-rw-r--r--include/uapi/linux/in6.h4
-rw-r--r--include/uapi/linux/iommufd.h154
-rw-r--r--include/uapi/linux/ipv6.h1
-rw-r--r--include/uapi/linux/kvm.h2
-rw-r--r--include/uapi/linux/mctp.h8
-rw-r--r--include/uapi/linux/media/raspberrypi/pisp_be_config.h9
-rw-r--r--include/uapi/linux/neighbour.h5
-rw-r--r--include/uapi/linux/net_dropmon.h7
-rw-r--r--include/uapi/linux/netconf.h1
-rw-r--r--include/uapi/linux/netdev.h6
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_hook.h2
-rw-r--r--include/uapi/linux/nl80211.h61
-rw-r--r--include/uapi/linux/openvswitch.h6
-rw-r--r--include/uapi/linux/pkt_sched.h68
-rw-r--r--include/uapi/linux/prctl.h2
-rw-r--r--include/uapi/linux/rkisp1-config.h106
-rw-r--r--include/uapi/linux/snmp.h1
-rw-r--r--include/uapi/linux/sysctl.h1
-rw-r--r--include/uapi/linux/v4l2-controls.h6
-rw-r--r--include/uapi/linux/vhost.h7
-rw-r--r--include/uapi/linux/vhost_types.h5
-rw-r--r--include/uapi/linux/videodev2.h9
-rw-r--r--include/uapi/linux/virtio_net.h33
-rw-r--r--include/uapi/rdma/efa-abi.h3
-rw-r--r--include/uapi/rdma/ib_user_ioctl_cmds.h36
-rw-r--r--include/ufs/ufs.h26
-rw-r--r--include/ufs/ufshcd.h1
-rw-r--r--include/video/edid.h3
-rw-r--r--include/video/sisfb.h6
-rw-r--r--include/xen/xen-ops.h2
-rw-r--r--include/xen/xenbus.h2
414 files changed, 21949 insertions, 3367 deletions
diff --git a/include/asm-generic/codetag.lds.h b/include/asm-generic/codetag.lds.h
index 372c320c5043..a14f4bdafdda 100644
--- a/include/asm-generic/codetag.lds.h
+++ b/include/asm-generic/codetag.lds.h
@@ -2,6 +2,12 @@
#ifndef __ASM_GENERIC_CODETAG_LDS_H
#define __ASM_GENERIC_CODETAG_LDS_H
+#ifdef CONFIG_MEM_ALLOC_PROFILING
+#define IF_MEM_ALLOC_PROFILING(...) __VA_ARGS__
+#else
+#define IF_MEM_ALLOC_PROFILING(...)
+#endif
+
#define SECTION_WITH_BOUNDARIES(_name) \
. = ALIGN(8); \
__start_##_name = .; \
@@ -9,13 +15,7 @@
__stop_##_name = .;
#define CODETAG_SECTIONS() \
- SECTION_WITH_BOUNDARIES(alloc_tags)
-
-/*
- * Module codetags which aren't used after module unload, therefore have the
- * same lifespan as the module and can be safely unloaded with the module.
- */
-#define MOD_CODETAG_SECTIONS()
+ IF_MEM_ALLOC_PROFILING(SECTION_WITH_BOUNDARIES(alloc_tags))
#define MOD_SEPARATE_CODETAG_SECTION(_name) \
.codetag.##_name : { \
@@ -28,6 +28,6 @@
* unload them individually once unused.
*/
#define MOD_SEPARATE_CODETAG_SECTIONS() \
- MOD_SEPARATE_CODETAG_SECTION(alloc_tags)
+ IF_MEM_ALLOC_PROFILING(MOD_SEPARATE_CODETAG_SECTION(alloc_tags))
#endif /* __ASM_GENERIC_CODETAG_LDS_H */
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index 3e0a8fe9b108..dcb8727f2b82 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -66,15 +66,6 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
}
#endif
-#ifndef __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
-static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
- unsigned long addr, unsigned long end,
- unsigned long floor, unsigned long ceiling)
-{
- free_pgd_range(tlb, addr, end, floor, ceiling);
-}
-#endif
-
#ifndef __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, unsigned long sz)
@@ -114,14 +105,6 @@ static inline int huge_pte_none_mostly(pte_t pte)
}
#endif
-#ifndef __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
-static inline int prepare_hugepage_range(struct file *file,
- unsigned long addr, unsigned long len)
-{
- return 0;
-}
-#endif
-
#ifndef __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
diff --git a/include/asm-generic/msi.h b/include/asm-generic/msi.h
index 124c734ca5d9..92cca4b23f13 100644
--- a/include/asm-generic/msi.h
+++ b/include/asm-generic/msi.h
@@ -33,6 +33,7 @@ typedef struct msi_alloc_info {
/* Device generating MSIs is proxying for another device */
#define MSI_ALLOC_FLAGS_PROXY_DEVICE (1UL << 0)
+#define MSI_ALLOC_FLAGS_FIXED_MSG_DATA (1UL << 1)
#define GENERIC_MSI_DOMAIN_OPS 1
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index fa5f19b8d53a..ae2d2359b79e 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -167,7 +167,7 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)
#define FTRACE_STUB_HACK
#endif
-#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+#ifdef CONFIG_DYNAMIC_FTRACE
/*
* The ftrace call sites are logged to a section whose name depends on the
* compiler option used. A given kernel image will only use one, AKA
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
index 545dbefe3e13..2e60344437da 100644
--- a/include/crypto/engine.h
+++ b/include/crypto/engine.h
@@ -76,7 +76,6 @@ int crypto_engine_stop(struct crypto_engine *engine);
struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
bool retry_support,
- int (*cbk_do_batch)(struct crypto_engine *engine),
bool rt, int qlen);
void crypto_engine_exit(struct crypto_engine *engine);
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index ffffd88bbbad..2d97440028ff 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -63,10 +63,7 @@ struct crypto_acomp_stream {
struct crypto_acomp_streams {
/* These must come first because of struct scomp_alg. */
void *(*alloc_ctx)(void);
- union {
- void (*free_ctx)(void *);
- void (*cfree_ctx)(const void *);
- };
+ void (*free_ctx)(void *);
struct crypto_acomp_stream __percpu *streams;
struct work_struct stream_work;
diff --git a/include/crypto/internal/engine.h b/include/crypto/internal/engine.h
index b6a4ea2240fc..f19ef376833f 100644
--- a/include/crypto/internal/engine.h
+++ b/include/crypto/internal/engine.h
@@ -21,7 +21,6 @@ struct device;
/*
* struct crypto_engine - crypto hardware engine
* @name: the engine name
- * @idling: the engine is entering idle state
* @busy: request pump is busy
* @running: the engine is on working
* @retry_support: indication that the hardware allows re-execution
@@ -31,14 +30,6 @@ struct device;
* @list: link with the global crypto engine list
* @queue_lock: spinlock to synchronise access to request queue
* @queue: the crypto queue of the engine
- * @prepare_crypt_hardware: a request will soon arrive from the queue
- * so the subsystem requests the driver to prepare the hardware
- * by issuing this call
- * @unprepare_crypt_hardware: there are currently no more requests on the
- * queue so the subsystem notifies the driver that it may relax the
- * hardware by issuing this call
- * @do_batch_requests: execute a batch of requests. Depends on multiple
- * requests support.
* @kworker: kthread worker struct for request pump
* @pump_requests: work struct for scheduling work to the request pump
* @priv_data: the engine private data
@@ -46,7 +37,6 @@ struct device;
*/
struct crypto_engine {
char name[ENGINE_NAME_LEN];
- bool idling;
bool busy;
bool running;
@@ -58,11 +48,6 @@ struct crypto_engine {
struct crypto_queue queue;
struct device *dev;
- int (*prepare_crypt_hardware)(struct crypto_engine *engine);
- int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
- int (*do_batch_requests)(struct crypto_engine *engine);
-
-
struct kthread_worker *kworker;
struct kthread_work pump_requests;
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 0f85c543f80b..6ec5f2f37ccb 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -30,6 +30,20 @@
__##name##_req, (req))
struct ahash_request;
+struct scatterlist;
+
+struct crypto_hash_walk {
+ const char *data;
+
+ unsigned int offset;
+ unsigned int flags;
+
+ struct page *pg;
+ unsigned int entrylen;
+
+ unsigned int total;
+ struct scatterlist *sg;
+};
struct ahash_instance {
void (*free)(struct ahash_instance *inst);
@@ -61,6 +75,15 @@ struct crypto_shash_spawn {
struct crypto_spawn base;
};
+int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
+int crypto_hash_walk_first(struct ahash_request *req,
+ struct crypto_hash_walk *walk);
+
+static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
+{
+ return !(walk->entrylen | walk->total);
+}
+
int crypto_register_ahash(struct ahash_alg *alg);
void crypto_unregister_ahash(struct ahash_alg *alg);
int crypto_register_ahashes(struct ahash_alg *algs, int count);
@@ -91,6 +114,12 @@ static inline bool crypto_hash_alg_needs_key(struct hash_alg_common *alg)
!(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY);
}
+static inline bool crypto_hash_no_export_core(struct crypto_ahash *tfm)
+{
+ return crypto_hash_alg_common(tfm)->base.cra_flags &
+ CRYPTO_AHASH_ALG_NO_EXPORT_CORE;
+}
+
int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
struct crypto_instance *inst,
const char *name, u32 type, u32 mask);
@@ -167,6 +196,13 @@ static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm,
tfm->reqsize = reqsize;
}
+static inline bool crypto_ahash_tested(struct crypto_ahash *tfm)
+{
+ struct crypto_tfm *tfm_base = crypto_ahash_tfm(tfm);
+
+ return tfm_base->__crt_alg->cra_flags & CRYPTO_ALG_TESTED;
+}
+
static inline void crypto_ahash_set_reqsize_dma(struct crypto_ahash *ahash,
unsigned int reqsize)
{
diff --git a/include/drm/amd/isp.h b/include/drm/amd/isp.h
new file mode 100644
index 000000000000..ec868288abf2
--- /dev/null
+++ b/include/drm/amd/isp.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2025 Advanced Micro Devices, Inc. All rights reserved.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+
+#ifndef __ISP_H__
+#define __ISP_H__
+
+#include <linux/types.h>
+
+struct device;
+
+struct isp_platform_data {
+ void *adev;
+ u32 asic_type;
+ resource_size_t base_rmmio_size;
+};
+
+int isp_user_buffer_alloc(struct device *dev, void *dmabuf,
+ void **buf_obj, u64 *buf_addr);
+
+void isp_user_buffer_free(void *buf_obj);
+
+int isp_kernel_buffer_alloc(struct device *dev, u64 size,
+ void **buf_obj, u64 *gpu_addr, void **cpu_addr);
+
+void isp_kernel_buffer_free(void **buf_obj, u64 *gpu_addr, void **cpu_addr);
+
+#endif
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index 3001c0b6e7bb..811e9238a77c 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -547,16 +547,28 @@
/* DFP Capability Extension */
#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
-#define DP_PANEL_REPLAY_CAP 0x0b0 /* DP 2.0 */
+#define DP_PANEL_REPLAY_CAP_SUPPORT 0x0b0 /* DP 2.0 */
# define DP_PANEL_REPLAY_SUPPORT (1 << 0)
# define DP_PANEL_REPLAY_SU_SUPPORT (1 << 1)
# define DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT (1 << 2) /* eDP 1.5 */
-#define DP_PANEL_PANEL_REPLAY_CAPABILITY 0xb1
-# define DP_PANEL_PANEL_REPLAY_SU_GRANULARITY_REQUIRED (1 << 5)
-
-#define DP_PANEL_PANEL_REPLAY_X_GRANULARITY 0xb2
-#define DP_PANEL_PANEL_REPLAY_Y_GRANULARITY 0xb4
+#define DP_PANEL_REPLAY_CAP_SIZE 7
+
+#define DP_PANEL_REPLAY_CAP_CAPABILITY 0xb1
+# define DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_SHIFT 1 /* DP 2.1a */
+# define DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_MASK (3 << DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_SHIFT)
+# define DP_DSC_DECODE_CAPABILITY_IN_PR_SUPPORTED 0x00
+# define DP_DSC_DECODE_CAPABILITY_IN_PR_FULL_FRAME_ONLY 0x01
+# define DP_DSC_DECODE_CAPABILITY_IN_PR_NOT_SUPPORTED 0x02
+# define DP_DSC_DECODE_CAPABILITY_IN_PR_RESERVED 0x03
+# define DP_PANEL_REPLAY_ASYNC_VIDEO_TIMING_NOT_SUPPORTED_IN_PR (1 << 3)
+# define DP_PANEL_REPLAY_DSC_CRC_OF_MULTIPLE_SUS_SUPPORTED (1 << 4)
+# define DP_PANEL_REPLAY_SU_GRANULARITY_REQUIRED (1 << 5)
+# define DP_PANEL_REPLAY_SU_Y_GRANULARITY_EXTENDED_CAPABILITY_SUPPORTED (1 << 6)
+# define DP_PANEL_REPLAY_LINK_OFF_SUPPORTED_IN_PR_AFTER_ADAPTIVE_SYNC_SDP (1 << 7)
+
+#define DP_PANEL_REPLAY_CAP_X_GRANULARITY 0xb2
+#define DP_PANEL_REPLAY_CAP_Y_GRANULARITY 0xb4
/* Link Configuration */
#define DP_LINK_BW_SET 0x100
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
index e4ca35143ff9..87caa4f1fdb8 100644
--- a/include/drm/display/drm_dp_helper.h
+++ b/include/drm/display/drm_dp_helper.h
@@ -523,10 +523,16 @@ struct drm_dp_aux {
* @no_zero_sized: If the hw can't use zero sized transfers (NVIDIA)
*/
bool no_zero_sized;
+
+ /**
+ * @dpcd_probe_disabled: If probing before a DPCD access is disabled.
+ */
+ bool dpcd_probe_disabled;
};
int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset);
void drm_dp_dpcd_set_powered(struct drm_dp_aux *aux, bool powered);
+void drm_dp_dpcd_set_probe(struct drm_dp_aux *aux, bool enable);
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size);
ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
@@ -837,6 +843,7 @@ drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
* @lsb_reg_used: Do we also write values to the DP_EDP_BACKLIGHT_BRIGHTNESS_LSB register?
* @aux_enable: Does the panel support the AUX enable cap?
* @aux_set: Does the panel support setting the brightness through AUX?
+ * @luminance_set: Does the panel support setting the brightness through AUX using luminance values?
*
* This structure contains various data about an eDP backlight, which can be populated by using
* drm_edp_backlight_init().
@@ -844,21 +851,23 @@ drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
struct drm_edp_backlight_info {
u8 pwmgen_bit_count;
u8 pwm_freq_pre_divider;
- u16 max;
+ u32 max;
bool lsb_reg_used : 1;
bool aux_enable : 1;
bool aux_set : 1;
+ bool luminance_set : 1;
};
int
drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl,
+ u32 max_luminance,
u16 driver_pwm_freq_hz, const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE],
- u16 *current_level, u8 *current_mode);
+ u32 *current_level, u8 *current_mode, bool need_luminance);
int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl,
- u16 level);
+ u32 level);
int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl,
- u16 level);
+ u32 level);
int drm_edp_backlight_disable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl);
#if IS_ENABLED(CONFIG_DRM_KMS_HELPER) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
diff --git a/include/drm/display/drm_hdmi_audio_helper.h b/include/drm/display/drm_hdmi_audio_helper.h
index c9a6faef4109..44d910bdc72d 100644
--- a/include/drm/display/drm_hdmi_audio_helper.h
+++ b/include/drm/display/drm_hdmi_audio_helper.h
@@ -14,6 +14,7 @@ int drm_connector_hdmi_audio_init(struct drm_connector *connector,
struct device *hdmi_codec_dev,
const struct drm_connector_hdmi_audio_funcs *funcs,
unsigned int max_i2s_playback_channels,
+ u64 i2s_formats,
bool spdif_playback,
int sound_dai_port);
void drm_connector_hdmi_audio_plugged_notify(struct drm_connector *connector,
diff --git a/include/drm/display/drm_hdmi_cec_helper.h b/include/drm/display/drm_hdmi_cec_helper.h
new file mode 100644
index 000000000000..fd8f4d2f02c1
--- /dev/null
+++ b/include/drm/display/drm_hdmi_cec_helper.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_DISPLAY_HDMI_CEC_HELPER
+#define DRM_DISPLAY_HDMI_CEC_HELPER
+
+#include <linux/types.h>
+
+struct drm_connector;
+
+struct cec_msg;
+struct device;
+
+struct drm_connector_hdmi_cec_funcs {
+ /**
+ * @init: perform hardware-specific initialization before registering the CEC adapter
+ */
+ int (*init)(struct drm_connector *connector);
+
+ /**
+ * @uninit: perform hardware-specific teardown for the CEC adapter
+ */
+ void (*uninit)(struct drm_connector *connector);
+
+ /**
+ * @enable: enable or disable CEC adapter
+ */
+ int (*enable)(struct drm_connector *connector, bool enable);
+
+ /**
+ * @log_addr: set adapter's logical address, can be called multiple
+ * times if adapter supports several LAs
+ */
+ int (*log_addr)(struct drm_connector *connector, u8 logical_addr);
+
+ /**
+ * @transmit: start transmission of the specified CEC message
+ */
+ int (*transmit)(struct drm_connector *connector, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg);
+};
+
+int drmm_connector_hdmi_cec_register(struct drm_connector *connector,
+ const struct drm_connector_hdmi_cec_funcs *funcs,
+ const char *name,
+ u8 available_las,
+ struct device *dev);
+
+void drm_connector_hdmi_cec_received_msg(struct drm_connector *connector,
+ struct cec_msg *msg);
+
+void drm_connector_hdmi_cec_transmit_done(struct drm_connector *connector,
+ u8 status,
+ u8 arb_lost_cnt, u8 nack_cnt,
+ u8 low_drive_cnt, u8 error_cnt);
+
+void drm_connector_hdmi_cec_transmit_attempt_done(struct drm_connector *connector,
+ u8 status);
+
+#if IS_ENABLED(CONFIG_DRM_DISPLAY_HDMI_CEC_NOTIFIER_HELPER)
+int drmm_connector_hdmi_cec_notifier_register(struct drm_connector *connector,
+ const char *port_name,
+ struct device *dev);
+#else
+static inline int drmm_connector_hdmi_cec_notifier_register(struct drm_connector *connector,
+ const char *port_name,
+ struct device *dev)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/include/drm/drm_accel.h b/include/drm/drm_accel.h
index 038ccb02f9a3..20a665ec6f16 100644
--- a/include/drm/drm_accel.h
+++ b/include/drm/drm_accel.h
@@ -58,7 +58,6 @@ void accel_core_exit(void);
int accel_core_init(void);
void accel_set_device_instance_params(struct device *kdev, int index);
int accel_open(struct inode *inode, struct file *filp);
-void accel_debugfs_init(struct drm_device *dev);
void accel_debugfs_register(struct drm_device *dev);
#else
@@ -77,10 +76,6 @@ static inline void accel_set_device_instance_params(struct device *kdev, int ind
{
}
-static inline void accel_debugfs_init(struct drm_device *dev)
-{
-}
-
static inline void accel_debugfs_register(struct drm_device *dev)
{
}
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 4e418a29a9ff..8ed80cad77ec 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -32,6 +32,7 @@
#include <drm/drm_mode_object.h>
#include <drm/drm_modes.h>
+struct cec_msg;
struct device_node;
struct drm_bridge;
@@ -77,6 +78,16 @@ struct drm_bridge_funcs {
enum drm_bridge_attach_flags flags);
/**
+ * @destroy:
+ *
+ * This callback is invoked when the bridge is about to be
+ * deallocated.
+ *
+ * The @destroy callback is optional.
+ */
+ void (*destroy)(struct drm_bridge *bridge);
+
+ /**
* @detach:
*
* This callback is invoked whenever our bridge is being detached from a
@@ -164,17 +175,33 @@ struct drm_bridge_funcs {
/**
* @disable:
*
- * This callback should disable the bridge. It is called right before
- * the preceding element in the display pipe is disabled. If the
- * preceding element is a bridge this means it's called before that
- * bridge's @disable vfunc. If the preceding element is a &drm_encoder
- * it's called right before the &drm_encoder_helper_funcs.disable,
- * &drm_encoder_helper_funcs.prepare or &drm_encoder_helper_funcs.dpms
- * hook.
+ * The @disable callback should disable the bridge.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is still running when this callback is called.
*
+ *
+ * If the preceding element is a &drm_bridge, then this is called before
+ * that bridge is disabled via one of:
+ *
+ * - &drm_bridge_funcs.disable
+ * - &drm_bridge_funcs.atomic_disable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called before the encoder is disabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_disable
+ * - &drm_encoder_helper_funcs.prepare
+ * - &drm_encoder_helper_funcs.disable
+ * - &drm_encoder_helper_funcs.dpms
+ *
+ * and the CRTC is disabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.prepare
+ * - &drm_crtc_helper_funcs.atomic_disable
+ * - &drm_crtc_helper_funcs.disable
+ * - &drm_crtc_helper_funcs.dpms.
+ *
* The @disable callback is optional.
*
* NOTE:
@@ -187,17 +214,34 @@ struct drm_bridge_funcs {
/**
* @post_disable:
*
- * This callback should disable the bridge. It is called right after the
- * preceding element in the display pipe is disabled. If the preceding
- * element is a bridge this means it's called after that bridge's
- * @post_disable function. If the preceding element is a &drm_encoder
- * it's called right after the encoder's
- * &drm_encoder_helper_funcs.disable, &drm_encoder_helper_funcs.prepare
- * or &drm_encoder_helper_funcs.dpms hook.
- *
* The bridge must assume that the display pipe (i.e. clocks and timing
- * signals) feeding it is no longer running when this callback is
- * called.
+ * signals) feeding this bridge is no longer running when the
+ * @post_disable is called.
+ *
+ * This callback should perform all the actions required by the hardware
+ * after it has stopped receiving signals from the preceding element.
+ *
+ * If the preceding element is a &drm_bridge, then this is called after
+ * that bridge is post-disabled (unless marked otherwise by the
+ * @pre_enable_prev_first flag) via one of:
+ *
+ * - &drm_bridge_funcs.post_disable
+ * - &drm_bridge_funcs.atomic_post_disable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called after the encoder is disabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_disable
+ * - &drm_encoder_helper_funcs.prepare
+ * - &drm_encoder_helper_funcs.disable
+ * - &drm_encoder_helper_funcs.dpms
+ *
+ * and the CRTC is disabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.prepare
+ * - &drm_crtc_helper_funcs.atomic_disable
+ * - &drm_crtc_helper_funcs.disable
+ * - &drm_crtc_helper_funcs.dpms
*
* The @post_disable callback is optional.
*
@@ -240,18 +284,30 @@ struct drm_bridge_funcs {
/**
* @pre_enable:
*
- * This callback should enable the bridge. It is called right before
- * the preceding element in the display pipe is enabled. If the
- * preceding element is a bridge this means it's called before that
- * bridge's @pre_enable function. If the preceding element is a
- * &drm_encoder it's called right before the encoder's
- * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or
- * &drm_encoder_helper_funcs.dpms hook.
- *
* The display pipe (i.e. clocks and timing signals) feeding this bridge
- * will not yet be running when this callback is called. The bridge must
- * not enable the display link feeding the next bridge in the chain (if
- * there is one) when this callback is called.
+ * will not yet be running when the @pre_enable is called.
+ *
+ * This callback should perform all the necessary actions to prepare the
+ * bridge to accept signals from the preceding element.
+ *
+ * If the preceding element is a &drm_bridge, then this is called before
+ * that bridge is pre-enabled (unless marked otherwise by
+ * @pre_enable_prev_first flag) via one of:
+ *
+ * - &drm_bridge_funcs.pre_enable
+ * - &drm_bridge_funcs.atomic_pre_enable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called before the CRTC is enabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.atomic_enable
+ * - &drm_crtc_helper_funcs.commit
+ *
+ * and the encoder is enabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_enable
+ * - &drm_encoder_helper_funcs.enable
+ * - &drm_encoder_helper_funcs.commit
*
* The @pre_enable callback is optional.
*
@@ -265,19 +321,31 @@ struct drm_bridge_funcs {
/**
* @enable:
*
- * This callback should enable the bridge. It is called right after
- * the preceding element in the display pipe is enabled. If the
- * preceding element is a bridge this means it's called after that
- * bridge's @enable function. If the preceding element is a
- * &drm_encoder it's called right after the encoder's
- * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or
- * &drm_encoder_helper_funcs.dpms hook.
+ * The @enable callback should enable the bridge.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is running when this callback is called. This
* callback must enable the display link feeding the next bridge in the
* chain if there is one.
*
+ * If the preceding element is a &drm_bridge, then this is called after
+ * that bridge is enabled via one of:
+ *
+ * - &drm_bridge_funcs.enable
+ * - &drm_bridge_funcs.atomic_enable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called after the CRTC is enabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.atomic_enable
+ * - &drm_crtc_helper_funcs.commit
+ *
+ * and the encoder is enabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_enable
+ * - &drm_encoder_helper_funcs.enable
+ * - drm_encoder_helper_funcs.commit
+ *
* The @enable callback is optional.
*
* NOTE:
@@ -290,17 +358,30 @@ struct drm_bridge_funcs {
/**
* @atomic_pre_enable:
*
- * This callback should enable the bridge. It is called right before
- * the preceding element in the display pipe is enabled. If the
- * preceding element is a bridge this means it's called before that
- * bridge's @atomic_pre_enable or @pre_enable function. If the preceding
- * element is a &drm_encoder it's called right before the encoder's
- * &drm_encoder_helper_funcs.atomic_enable hook.
- *
* The display pipe (i.e. clocks and timing signals) feeding this bridge
- * will not yet be running when this callback is called. The bridge must
- * not enable the display link feeding the next bridge in the chain (if
- * there is one) when this callback is called.
+ * will not yet be running when the @atomic_pre_enable is called.
+ *
+ * This callback should perform all the necessary actions to prepare the
+ * bridge to accept signals from the preceding element.
+ *
+ * If the preceding element is a &drm_bridge, then this is called before
+ * that bridge is pre-enabled (unless marked otherwise by
+ * @pre_enable_prev_first flag) via one of:
+ *
+ * - &drm_bridge_funcs.pre_enable
+ * - &drm_bridge_funcs.atomic_pre_enable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called before the CRTC is enabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.atomic_enable
+ * - &drm_crtc_helper_funcs.commit
+ *
+ * and the encoder is enabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_enable
+ * - &drm_encoder_helper_funcs.enable
+ * - &drm_encoder_helper_funcs.commit
*
* The @atomic_pre_enable callback is optional.
*/
@@ -310,18 +391,31 @@ struct drm_bridge_funcs {
/**
* @atomic_enable:
*
- * This callback should enable the bridge. It is called right after
- * the preceding element in the display pipe is enabled. If the
- * preceding element is a bridge this means it's called after that
- * bridge's @atomic_enable or @enable function. If the preceding element
- * is a &drm_encoder it's called right after the encoder's
- * &drm_encoder_helper_funcs.atomic_enable hook.
+ * The @atomic_enable callback should enable the bridge.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is running when this callback is called. This
* callback must enable the display link feeding the next bridge in the
* chain if there is one.
*
+ * If the preceding element is a &drm_bridge, then this is called after
+ * that bridge is enabled via one of:
+ *
+ * - &drm_bridge_funcs.enable
+ * - &drm_bridge_funcs.atomic_enable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called after the CRTC is enabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.atomic_enable
+ * - &drm_crtc_helper_funcs.commit
+ *
+ * and the encoder is enabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_enable
+ * - &drm_encoder_helper_funcs.enable
+ * - drm_encoder_helper_funcs.commit
+ *
* The @atomic_enable callback is optional.
*/
void (*atomic_enable)(struct drm_bridge *bridge,
@@ -329,16 +423,32 @@ struct drm_bridge_funcs {
/**
* @atomic_disable:
*
- * This callback should disable the bridge. It is called right before
- * the preceding element in the display pipe is disabled. If the
- * preceding element is a bridge this means it's called before that
- * bridge's @atomic_disable or @disable vfunc. If the preceding element
- * is a &drm_encoder it's called right before the
- * &drm_encoder_helper_funcs.atomic_disable hook.
+ * The @atomic_disable callback should disable the bridge.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is still running when this callback is called.
*
+ * If the preceding element is a &drm_bridge, then this is called before
+ * that bridge is disabled via one of:
+ *
+ * - &drm_bridge_funcs.disable
+ * - &drm_bridge_funcs.atomic_disable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called before the encoder is disabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_disable
+ * - &drm_encoder_helper_funcs.prepare
+ * - &drm_encoder_helper_funcs.disable
+ * - &drm_encoder_helper_funcs.dpms
+ *
+ * and the CRTC is disabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.prepare
+ * - &drm_crtc_helper_funcs.atomic_disable
+ * - &drm_crtc_helper_funcs.disable
+ * - &drm_crtc_helper_funcs.dpms.
+ *
* The @atomic_disable callback is optional.
*/
void (*atomic_disable)(struct drm_bridge *bridge,
@@ -347,16 +457,34 @@ struct drm_bridge_funcs {
/**
* @atomic_post_disable:
*
- * This callback should disable the bridge. It is called right after the
- * preceding element in the display pipe is disabled. If the preceding
- * element is a bridge this means it's called after that bridge's
- * @atomic_post_disable or @post_disable function. If the preceding
- * element is a &drm_encoder it's called right after the encoder's
- * &drm_encoder_helper_funcs.atomic_disable hook.
- *
* The bridge must assume that the display pipe (i.e. clocks and timing
- * signals) feeding it is no longer running when this callback is
- * called.
+ * signals) feeding this bridge is no longer running when the
+ * @atomic_post_disable is called.
+ *
+ * This callback should perform all the actions required by the hardware
+ * after it has stopped receiving signals from the preceding element.
+ *
+ * If the preceding element is a &drm_bridge, then this is called after
+ * that bridge is post-disabled (unless marked otherwise by the
+ * @pre_enable_prev_first flag) via one of:
+ *
+ * - &drm_bridge_funcs.post_disable
+ * - &drm_bridge_funcs.atomic_post_disable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called after the encoder is disabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_disable
+ * - &drm_encoder_helper_funcs.prepare
+ * - &drm_encoder_helper_funcs.disable
+ * - &drm_encoder_helper_funcs.dpms
+ *
+ * and the CRTC is disabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.prepare
+ * - &drm_crtc_helper_funcs.atomic_disable
+ * - &drm_crtc_helper_funcs.disable
+ * - &drm_crtc_helper_funcs.dpms
*
* The @atomic_post_disable callback is optional.
*/
@@ -532,7 +660,8 @@ struct drm_bridge_funcs {
*
* drm_connector_status indicating the bridge output status.
*/
- enum drm_connector_status (*detect)(struct drm_bridge *bridge);
+ enum drm_connector_status (*detect)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
/**
* @get_modes:
@@ -689,8 +818,8 @@ struct drm_bridge_funcs {
* Returns:
* 0 on success, a negative error code otherwise
*/
- int (*hdmi_audio_startup)(struct drm_connector *connector,
- struct drm_bridge *bridge);
+ int (*hdmi_audio_startup)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
/**
* @hdmi_audio_prepare:
@@ -703,8 +832,8 @@ struct drm_bridge_funcs {
* Returns:
* 0 on success, a negative error code otherwise
*/
- int (*hdmi_audio_prepare)(struct drm_connector *connector,
- struct drm_bridge *bridge,
+ int (*hdmi_audio_prepare)(struct drm_bridge *bridge,
+ struct drm_connector *connector,
struct hdmi_codec_daifmt *fmt,
struct hdmi_codec_params *hparms);
@@ -719,8 +848,8 @@ struct drm_bridge_funcs {
* Returns:
* 0 on success, a negative error code otherwise
*/
- void (*hdmi_audio_shutdown)(struct drm_connector *connector,
- struct drm_bridge *bridge);
+ void (*hdmi_audio_shutdown)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
/**
* @hdmi_audio_mute_stream:
@@ -733,10 +862,20 @@ struct drm_bridge_funcs {
* Returns:
* 0 on success, a negative error code otherwise
*/
- int (*hdmi_audio_mute_stream)(struct drm_connector *connector,
- struct drm_bridge *bridge,
+ int (*hdmi_audio_mute_stream)(struct drm_bridge *bridge,
+ struct drm_connector *connector,
bool enable, int direction);
+ int (*hdmi_cec_init)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+
+ int (*hdmi_cec_enable)(struct drm_bridge *bridge, bool enable);
+
+ int (*hdmi_cec_log_addr)(struct drm_bridge *bridge, u8 logical_addr);
+
+ int (*hdmi_cec_transmit)(struct drm_bridge *bridge, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg);
+
/**
* @dp_audio_startup:
*
@@ -748,8 +887,8 @@ struct drm_bridge_funcs {
* Returns:
* 0 on success, a negative error code otherwise
*/
- int (*dp_audio_startup)(struct drm_connector *connector,
- struct drm_bridge *bridge);
+ int (*dp_audio_startup)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
/**
* @dp_audio_prepare:
@@ -762,8 +901,8 @@ struct drm_bridge_funcs {
* Returns:
* 0 on success, a negative error code otherwise
*/
- int (*dp_audio_prepare)(struct drm_connector *connector,
- struct drm_bridge *bridge,
+ int (*dp_audio_prepare)(struct drm_bridge *bridge,
+ struct drm_connector *connector,
struct hdmi_codec_daifmt *fmt,
struct hdmi_codec_params *hparms);
@@ -778,8 +917,8 @@ struct drm_bridge_funcs {
* Returns:
* 0 on success, a negative error code otherwise
*/
- void (*dp_audio_shutdown)(struct drm_connector *connector,
- struct drm_bridge *bridge);
+ void (*dp_audio_shutdown)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
/**
* @dp_audio_mute_stream:
@@ -792,8 +931,8 @@ struct drm_bridge_funcs {
* Returns:
* 0 on success, a negative error code otherwise
*/
- int (*dp_audio_mute_stream)(struct drm_connector *connector,
- struct drm_bridge *bridge,
+ int (*dp_audio_mute_stream)(struct drm_bridge *bridge,
+ struct drm_connector *connector,
bool enable, int direction);
/**
@@ -907,6 +1046,16 @@ enum drm_bridge_ops {
* flag.
*/
DRM_BRIDGE_OP_DP_AUDIO = BIT(6),
+ /**
+ * @DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER: The bridge requires CEC notifier
+ * to be present.
+ */
+ DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER = BIT(7),
+ /**
+ * @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER: The bridge requires CEC adapter
+ * to be present.
+ */
+ DRM_BRIDGE_OP_HDMI_CEC_ADAPTER = BIT(8),
};
/**
@@ -977,21 +1126,6 @@ struct drm_bridge {
* @ddc: Associated I2C adapter for DDC access, if any.
*/
struct i2c_adapter *ddc;
- /** private: */
- /**
- * @hpd_mutex: Protects the @hpd_cb and @hpd_data fields.
- */
- struct mutex hpd_mutex;
- /**
- * @hpd_cb: Hot plug detection callback, registered with
- * drm_bridge_hpd_enable().
- */
- void (*hpd_cb)(void *data, enum drm_connector_status status);
- /**
- * @hpd_data: Private data passed to the Hot plug detection callback
- * @hpd_cb.
- */
- void *hpd_data;
/**
* @vendor: Vendor of the product to be used for the SPD InfoFrame
@@ -1019,6 +1153,12 @@ struct drm_bridge {
unsigned int max_bpc;
/**
+ * @hdmi_cec_dev: device to be used as a containing device for CEC
+ * functions.
+ */
+ struct device *hdmi_cec_dev;
+
+ /**
* @hdmi_audio_dev: device to be used as a parent for the HDMI Codec if
* either of @DRM_BRIDGE_OP_HDMI_AUDIO or @DRM_BRIDGE_OP_DP_AUDIO is set.
*/
@@ -1032,6 +1172,14 @@ struct drm_bridge {
int hdmi_audio_max_i2s_playback_channels;
/**
+ * @hdmi_audio_i2s_formats: supported I2S formats, optional. The
+ * default is to allow all formats supported by the corresponding I2S
+ * bus driver. This is only used for bridges setting
+ * @DRM_BRIDGE_OP_HDMI_AUDIO or @DRM_BRIDGE_OP_DP_AUDIO.
+ */
+ u64 hdmi_audio_i2s_formats;
+
+ /**
* @hdmi_audio_spdif_playback: set if this bridge has S/PDIF playback
* port for @DRM_BRIDGE_OP_HDMI_AUDIO or @DRM_BRIDGE_OP_DP_AUDIO.
*/
@@ -1043,6 +1191,32 @@ struct drm_bridge {
* not used.
*/
int hdmi_audio_dai_port;
+
+ /**
+ * @hdmi_cec_adapter_name: the name of the adapter to register
+ */
+ const char *hdmi_cec_adapter_name;
+
+ /**
+ * @hdmi_cec_available_las: number of logical addresses, CEC_MAX_LOG_ADDRS if unset
+ */
+ u8 hdmi_cec_available_las;
+
+ /** private: */
+ /**
+ * @hpd_mutex: Protects the @hpd_cb and @hpd_data fields.
+ */
+ struct mutex hpd_mutex;
+ /**
+ * @hpd_cb: Hot plug detection callback, registered with
+ * drm_bridge_hpd_enable().
+ */
+ void (*hpd_cb)(void *data, enum drm_connector_status status);
+ /**
+ * @hpd_data: Private data passed to the Hot plug detection callback
+ * @hpd_cb.
+ */
+ void *hpd_data;
};
static inline struct drm_bridge *
@@ -1209,7 +1383,8 @@ drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
u32 output_fmt,
unsigned int *num_input_fmts);
-enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge);
+enum drm_connector_status
+drm_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector);
int drm_bridge_get_modes(struct drm_bridge *bridge,
struct drm_connector *connector);
const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge,
@@ -1274,6 +1449,8 @@ static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm,
}
#endif
+void devm_drm_put_bridge(struct device *dev, struct drm_bridge *bridge);
+
void drm_bridge_debugfs_params(struct dentry *root);
void drm_bridge_debugfs_encoder_params(struct dentry *root, struct drm_encoder *encoder);
diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h
index ed81741036d7..6cb577f6dba6 100644
--- a/include/drm/drm_color_mgmt.h
+++ b/include/drm/drm_color_mgmt.h
@@ -118,4 +118,31 @@ enum drm_color_lut_tests {
};
int drm_color_lut_check(const struct drm_property_blob *lut, u32 tests);
+
+/*
+ * Gamma-LUT programming
+ */
+
+typedef void (*drm_crtc_set_lut_func)(struct drm_crtc *, unsigned int, u16, u16, u16);
+
+void drm_crtc_load_gamma_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma);
+void drm_crtc_load_gamma_565_from_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma);
+void drm_crtc_load_gamma_555_from_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma);
+
+void drm_crtc_fill_gamma_888(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma);
+void drm_crtc_fill_gamma_565(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma);
+void drm_crtc_fill_gamma_555(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma);
+
+/*
+ * Color-LUT programming
+ */
+
+void drm_crtc_load_palette_8(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_palette);
+
+void drm_crtc_fill_palette_8(struct drm_crtc *crtc, drm_crtc_set_lut_func set_palette);
+
#endif
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index f13d597370a3..8f34f4b8183d 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -800,6 +800,11 @@ struct drm_display_info {
struct drm_hdmi_info hdmi;
/**
+ * @hdr_sink_metadata: HDR Metadata Information read from sink
+ */
+ struct hdr_sink_metadata hdr_sink_metadata;
+
+ /**
* @non_desktop: Non desktop display (HMD).
*/
bool non_desktop;
@@ -843,7 +848,9 @@ struct drm_display_info {
int vics_len;
/**
- * @quirks: EDID based quirks. Internal to EDID parsing.
+ * @quirks: EDID based quirks. DRM core and drivers can query the
+ * @drm_edid_quirk quirks using drm_edid_has_quirk(), the rest of
+ * the quirks also tracked here are internal to EDID parsing.
*/
u32 quirks;
@@ -1191,6 +1198,29 @@ struct drm_connector_hdmi_audio_funcs {
bool enable, int direction);
};
+void drm_connector_cec_phys_addr_invalidate(struct drm_connector *connector);
+void drm_connector_cec_phys_addr_set(struct drm_connector *connector);
+
+/**
+ * struct drm_connector_cec_funcs - drm_hdmi_connector control functions
+ */
+struct drm_connector_cec_funcs {
+ /**
+ * @phys_addr_invalidate: mark CEC physical address as invalid
+ *
+ * The callback to mark CEC physical address as invalid, abstracting
+ * the operation.
+ */
+ void (*phys_addr_invalidate)(struct drm_connector *connector);
+
+ /**
+ * @phys_addr_set: set CEC physical address
+ *
+ * The callback to set CEC physical address, abstracting the operation.
+ */
+ void (*phys_addr_set)(struct drm_connector *connector, u16 addr);
+};
+
/**
* struct drm_connector_hdmi_funcs - drm_hdmi_connector control functions
*/
@@ -1833,6 +1863,26 @@ struct drm_connector_hdmi {
};
/**
+ * struct drm_connector_cec - DRM Connector CEC-related structure
+ */
+struct drm_connector_cec {
+ /**
+ * @mutex: protects all fields in this structure.
+ */
+ struct mutex mutex;
+
+ /**
+ * @funcs: CEC Control Functions
+ */
+ const struct drm_connector_cec_funcs *funcs;
+
+ /**
+ * @data: CEC implementation-specific data
+ */
+ void *data;
+};
+
+/**
* struct drm_connector - central DRM connector control structure
*
* Each connector may be connected to one or more CRTCs, or may be clonable by
@@ -2241,9 +2291,6 @@ struct drm_connector {
*/
struct llist_node free_node;
- /** @hdr_sink_metadata: HDR Metadata Information read from sink */
- struct hdr_sink_metadata hdr_sink_metadata;
-
/**
* @hdmi: HDMI-related variable and properties.
*/
@@ -2253,6 +2300,11 @@ struct drm_connector {
* @hdmi_audio: HDMI codec properties and non-DRM state.
*/
struct drm_connector_hdmi_audio hdmi_audio;
+
+ /**
+ * @cec: CEC-related data.
+ */
+ struct drm_connector_cec cec;
};
#define obj_to_connector(x) container_of(x, struct drm_connector, base)
diff --git a/include/drm/drm_debugfs.h b/include/drm/drm_debugfs.h
index cf06cee4343f..ea8cba94208a 100644
--- a/include/drm/drm_debugfs.h
+++ b/include/drm/drm_debugfs.h
@@ -153,6 +153,9 @@ void drm_debugfs_add_files(struct drm_device *dev,
int drm_debugfs_gpuva_info(struct seq_file *m,
struct drm_gpuvm *gpuvm);
+
+void drm_debugfs_clients_add(struct drm_file *file);
+void drm_debugfs_clients_remove(struct drm_file *file);
#else
static inline void drm_debugfs_create_files(const struct drm_info_list *files,
int count, struct dentry *root,
@@ -181,6 +184,14 @@ static inline int drm_debugfs_gpuva_info(struct seq_file *m,
{
return 0;
}
+
+static inline void drm_debugfs_clients_add(struct drm_file *file)
+{
+}
+
+static inline void drm_debugfs_clients_remove(struct drm_file *file)
+{
+}
#endif
#endif /* _DRM_DEBUGFS_H_ */
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index e2f894f1b90a..a33aedd5e9ec 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -5,6 +5,7 @@
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/idr.h>
+#include <linux/sched.h>
#include <drm/drm_mode_config.h>
@@ -31,6 +32,16 @@ struct pci_controller;
#define DRM_WEDGE_RECOVERY_BUS_RESET BIT(2) /* unbind + reset bus device + bind */
/**
+ * struct drm_wedge_task_info - information about the guilty task of a wedge dev
+ */
+struct drm_wedge_task_info {
+ /** @pid: pid of the task */
+ pid_t pid;
+ /** @comm: command name of the task */
+ char comm[TASK_COMM_LEN];
+};
+
+/**
* enum switch_power_state - power state of drm device
*/
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 63b51942d606..42fc085f986d 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -487,7 +487,8 @@ void drm_put_dev(struct drm_device *dev);
bool drm_dev_enter(struct drm_device *dev, int *idx);
void drm_dev_exit(int idx);
void drm_dev_unplug(struct drm_device *dev);
-int drm_dev_wedged_event(struct drm_device *dev, unsigned long method);
+int drm_dev_wedged_event(struct drm_device *dev, unsigned long method,
+ struct drm_wedge_task_info *info);
/**
* drm_dev_is_unplugged - is a DRM device unplugged
@@ -571,9 +572,24 @@ static inline bool drm_firmware_drivers_only(void)
}
#if defined(CONFIG_DEBUG_FS)
-void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root);
+void drm_debugfs_dev_init(struct drm_device *dev);
+void drm_debugfs_init_root(void);
+void drm_debugfs_remove_root(void);
+void drm_debugfs_bridge_params(void);
#else
-static inline void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root)
+static inline void drm_debugfs_dev_init(struct drm_device *dev)
+{
+}
+
+static inline void drm_debugfs_init_root(void)
+{
+}
+
+static inline void drm_debugfs_remove_root(void)
+{
+}
+
+static inline void drm_debugfs_bridge_params(void)
{
}
#endif
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index b38409670868..3d1aecfec9b2 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -109,6 +109,13 @@ struct detailed_data_string {
#define DRM_EDID_CVT_FLAGS_STANDARD_BLANKING (1 << 3)
#define DRM_EDID_CVT_FLAGS_REDUCED_BLANKING (1 << 4)
+enum drm_edid_quirk {
+ /* Do a dummy read before DPCD accesses, to prevent corruption. */
+ DRM_EDID_QUIRK_DP_DPCD_PROBE,
+
+ DRM_EDID_QUIRK_NUM,
+};
+
struct detailed_data_monitor_range {
u8 min_vfreq;
u8 max_vfreq;
@@ -476,5 +483,6 @@ void drm_edid_print_product_id(struct drm_printer *p,
u32 drm_edid_get_panel_id(const struct drm_edid *drm_edid);
bool drm_edid_match(const struct drm_edid *drm_edid,
const struct drm_edid_ident *ident);
+bool drm_edid_has_quirk(struct drm_connector *connector, enum drm_edid_quirk quirk);
#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index d344d41e6cfe..115763799625 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -403,6 +403,13 @@ struct drm_file {
* @client_name_lock: Protects @client_name.
*/
struct mutex client_name_lock;
+
+ /**
+ * @debugfs_client:
+ *
+ * debugfs directory for each client under a drm node.
+ */
+ struct dentry *debugfs_client;
};
/**
diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
index d8539174ca11..562bc383ece4 100644
--- a/include/drm/drm_format_helper.h
+++ b/include/drm/drm_format_helper.h
@@ -82,8 +82,10 @@ void drm_fb_xrgb8888_to_rgb332(struct iosys_map *dst, const unsigned int *dst_pi
const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, struct drm_format_conv_state *state,
- bool swab);
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_rgb565be(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_xrgb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, struct drm_format_conv_state *state);
@@ -102,6 +104,15 @@ void drm_fb_xrgb8888_to_bgr888(struct iosys_map *dst, const unsigned int *dst_pi
void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_bgrx8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip,
@@ -125,8 +136,4 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, struct drm_format_conv_state *state);
-size_t drm_fb_build_fourcc_list(struct drm_device *dev,
- const u32 *native_fourccs, size_t native_nfourccs,
- u32 *fourccs_out, size_t nfourccs_out);
-
#endif /* __LINUX_DRM_FORMAT_HELPER_H */
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index c3f4405d6662..471784426857 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -54,7 +54,6 @@
#endif
struct drm_device;
-struct drm_mode_fb_cmd2;
/**
* struct drm_format_info - information about a DRM format
@@ -309,7 +308,7 @@ const struct drm_format_info *__drm_format_info(u32 format);
const struct drm_format_info *drm_format_info(u32 format);
const struct drm_format_info *
drm_get_format_info(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd);
+ u32 pixel_format, u64 modifier);
uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
uint32_t drm_driver_legacy_fb_format(struct drm_device *dev,
uint32_t bpp, uint32_t depth);
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index a3133a08267c..d3a7b43e2c63 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -126,7 +126,8 @@ struct drm_gem_object_funcs {
/**
* @pin:
*
- * Pin backing buffer in memory. Used by the drm_gem_map_attach() helper.
+ * Pin backing buffer in memory, such that dma-buf importers can
+ * access it. Used by the drm_gem_map_attach() helper.
*
* This callback is optional.
*/
@@ -559,10 +560,12 @@ void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock);
void drm_gem_lru_remove(struct drm_gem_object *obj);
void drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj);
void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj);
-unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
- unsigned int nr_to_scan,
- unsigned long *remaining,
- bool (*shrink)(struct drm_gem_object *obj));
+unsigned long
+drm_gem_lru_scan(struct drm_gem_lru *lru,
+ unsigned int nr_to_scan,
+ unsigned long *remaining,
+ bool (*shrink)(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket),
+ struct ww_acquire_ctx *ticket);
int drm_gem_evict_locked(struct drm_gem_object *obj);
diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h
index d302521f3dd4..24f1fd40d553 100644
--- a/include/drm/drm_gem_framebuffer_helper.h
+++ b/include/drm/drm_gem_framebuffer_helper.h
@@ -8,6 +8,7 @@ struct drm_afbc_framebuffer;
struct drm_device;
struct drm_fb_helper_surface_size;
struct drm_file;
+struct drm_format_info;
struct drm_framebuffer;
struct drm_framebuffer_funcs;
struct drm_gem_object;
@@ -24,17 +25,21 @@ int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file,
int drm_gem_fb_init_with_funcs(struct drm_device *dev,
struct drm_framebuffer *fb,
struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs);
struct drm_framebuffer *
drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs);
struct drm_framebuffer *
drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer *
drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map,
@@ -47,6 +52,7 @@ void drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_directi
(((modifier) & AFBC_VENDOR_AND_TYPE_MASK) == DRM_FORMAT_MOD_ARM_AFBC(0))
int drm_gem_fb_afbc_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_afbc_framebuffer *afbc_fb);
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index b4f993da3cae..92f5db84b9c2 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -287,15 +287,18 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt);
int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
+struct drm_gem_object *drm_gem_shmem_prime_import_no_map(struct drm_device *dev,
+ struct dma_buf *buf);
/**
* DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations
*
- * This macro provides a shortcut for setting the shmem GEM operations in
- * the &drm_driver structure.
+ * This macro provides a shortcut for setting the shmem GEM operations
+ * in the &drm_driver structure. Drivers that do not require an s/g table
+ * for imported buffers should use this.
*/
#define DRM_GEM_SHMEM_DRIVER_OPS \
- .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \
- .dumb_create = drm_gem_shmem_dumb_create
+ .gem_prime_import = drm_gem_shmem_prime_import_no_map, \
+ .dumb_create = drm_gem_shmem_dumb_create
#endif /* __DRM_GEM_SHMEM_HELPER_H__ */
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index 00830b49a3ff..2dd42bed679d 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -94,8 +94,6 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
unsigned long pg_align);
void drm_gem_vram_put(struct drm_gem_vram_object *gbo);
s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo);
-int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag);
-int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo);
int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct iosys_map *map);
void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo,
struct iosys_map *map);
diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h
index eaf704d3d05e..4aedc5423aff 100644
--- a/include/drm/drm_gpusvm.h
+++ b/include/drm/drm_gpusvm.h
@@ -16,92 +16,10 @@ struct drm_gpusvm;
struct drm_gpusvm_notifier;
struct drm_gpusvm_ops;
struct drm_gpusvm_range;
-struct drm_gpusvm_devmem;
struct drm_pagemap;
struct drm_pagemap_device_addr;
/**
- * struct drm_gpusvm_devmem_ops - Operations structure for GPU SVM device memory
- *
- * This structure defines the operations for GPU Shared Virtual Memory (SVM)
- * device memory. These operations are provided by the GPU driver to manage device memory
- * allocations and perform operations such as migration between device memory and system
- * RAM.
- */
-struct drm_gpusvm_devmem_ops {
- /**
- * @devmem_release: Release device memory allocation (optional)
- * @devmem_allocation: device memory allocation
- *
- * Release device memory allocation and drop a reference to device
- * memory allocation.
- */
- void (*devmem_release)(struct drm_gpusvm_devmem *devmem_allocation);
-
- /**
- * @populate_devmem_pfn: Populate device memory PFN (required for migration)
- * @devmem_allocation: device memory allocation
- * @npages: Number of pages to populate
- * @pfn: Array of page frame numbers to populate
- *
- * Populate device memory page frame numbers (PFN).
- *
- * Return: 0 on success, a negative error code on failure.
- */
- int (*populate_devmem_pfn)(struct drm_gpusvm_devmem *devmem_allocation,
- unsigned long npages, unsigned long *pfn);
-
- /**
- * @copy_to_devmem: Copy to device memory (required for migration)
- * @pages: Pointer to array of device memory pages (destination)
- * @dma_addr: Pointer to array of DMA addresses (source)
- * @npages: Number of pages to copy
- *
- * Copy pages to device memory.
- *
- * Return: 0 on success, a negative error code on failure.
- */
- int (*copy_to_devmem)(struct page **pages,
- dma_addr_t *dma_addr,
- unsigned long npages);
-
- /**
- * @copy_to_ram: Copy to system RAM (required for migration)
- * @pages: Pointer to array of device memory pages (source)
- * @dma_addr: Pointer to array of DMA addresses (destination)
- * @npages: Number of pages to copy
- *
- * Copy pages to system RAM.
- *
- * Return: 0 on success, a negative error code on failure.
- */
- int (*copy_to_ram)(struct page **pages,
- dma_addr_t *dma_addr,
- unsigned long npages);
-};
-
-/**
- * struct drm_gpusvm_devmem - Structure representing a GPU SVM device memory allocation
- *
- * @dev: Pointer to the device structure which device memory allocation belongs to
- * @mm: Pointer to the mm_struct for the address space
- * @detached: device memory allocations is detached from device pages
- * @ops: Pointer to the operations structure for GPU SVM device memory
- * @dpagemap: The struct drm_pagemap of the pages this allocation belongs to.
- * @size: Size of device memory allocation
- * @timeslice_expiration: Timeslice expiration in jiffies
- */
-struct drm_gpusvm_devmem {
- struct device *dev;
- struct mm_struct *mm;
- struct completion detached;
- const struct drm_gpusvm_devmem_ops *ops;
- struct drm_pagemap *dpagemap;
- size_t size;
- u64 timeslice_expiration;
-};
-
-/**
* struct drm_gpusvm_ops - Operations structure for GPU SVM
*
* This structure defines the operations for GPU Shared Virtual Memory (SVM).
@@ -327,6 +245,11 @@ void drm_gpusvm_fini(struct drm_gpusvm *gpusvm);
void drm_gpusvm_free(struct drm_gpusvm *gpusvm);
+unsigned long
+drm_gpusvm_find_vma_start(struct drm_gpusvm *gpusvm,
+ unsigned long start,
+ unsigned long end);
+
struct drm_gpusvm_range *
drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm,
unsigned long fault_addr,
@@ -356,15 +279,6 @@ void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_range *range,
const struct drm_gpusvm_ctx *ctx);
-int drm_gpusvm_migrate_to_devmem(struct drm_gpusvm *gpusvm,
- struct drm_gpusvm_range *range,
- struct drm_gpusvm_devmem *devmem_allocation,
- const struct drm_gpusvm_ctx *ctx);
-
-int drm_gpusvm_evict_to_ram(struct drm_gpusvm_devmem *devmem_allocation);
-
-const struct dev_pagemap_ops *drm_gpusvm_pagemap_ops_get(void);
-
bool drm_gpusvm_has_mapping(struct drm_gpusvm *gpusvm, unsigned long start,
unsigned long end);
@@ -375,11 +289,6 @@ drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range,
const struct mmu_notifier_range *mmu_range);
-void drm_gpusvm_devmem_init(struct drm_gpusvm_devmem *devmem_allocation,
- struct device *dev, struct mm_struct *mm,
- const struct drm_gpusvm_devmem_ops *ops,
- struct drm_pagemap *dpagemap, size_t size);
-
#ifdef CONFIG_LOCKDEP
/**
* drm_gpusvm_driver_set_lock() - Set the lock protecting accesses to GPU SVM
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
index 2a9629377633..274532facfd6 100644
--- a/include/drm/drm_gpuvm.h
+++ b/include/drm/drm_gpuvm.h
@@ -1211,6 +1211,14 @@ int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
u64 addr, u64 range);
+int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec, unsigned int num_fences,
+ u64 req_addr, u64 req_range,
+ struct drm_gem_object *obj, u64 offset);
+
+int drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
+ u64 req_addr, u64 req_range);
+
void drm_gpuva_map(struct drm_gpuvm *gpuvm,
struct drm_gpuva *va,
struct drm_gpuva_op_map *op);
diff --git a/include/drm/drm_managed.h b/include/drm/drm_managed.h
index 53017cc609ac..72bfac002c06 100644
--- a/include/drm/drm_managed.h
+++ b/include/drm/drm_managed.h
@@ -129,14 +129,25 @@ void __drmm_mutex_release(struct drm_device *dev, void *res);
void __drmm_workqueue_release(struct drm_device *device, void *wq);
+/**
+ * drmm_alloc_ordered_workqueue - &drm_device managed alloc_ordered_workqueue()
+ * @dev: DRM device
+ * @fmt: printf format for the name of the workqueue
+ * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
+ * @args: args for @fmt
+ *
+ * This is a &drm_device-managed version of alloc_ordered_workqueue(). The
+ * allocated workqueue is automatically destroyed on the final drm_dev_put().
+ *
+ * Returns: workqueue on success, negative ERR_PTR otherwise.
+ */
#define drmm_alloc_ordered_workqueue(dev, fmt, flags, args...) \
({ \
struct workqueue_struct *wq = alloc_ordered_workqueue(fmt, flags, ##args); \
wq ? ({ \
int ret = drmm_add_action_or_reset(dev, __drmm_workqueue_release, wq); \
ret ? ERR_PTR(ret) : wq; \
- }) : \
- wq; \
+ }) : ERR_PTR(-ENOMEM); \
})
#endif
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 6d2c08e81101..57a869a6f6e8 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -130,8 +130,6 @@ struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node);
#define MIPI_DSI_MODE_VIDEO_NO_HBP BIT(6)
/* disable hsync-active area */
#define MIPI_DSI_MODE_VIDEO_NO_HSA BIT(7)
-/* flush display FIFO on vsync pulse */
-#define MIPI_DSI_MODE_VSYNC_FLUSH BIT(8)
/* disable EoT packets in HS mode */
#define MIPI_DSI_MODE_NO_EOT_PACKET BIT(9)
/* device supports non-continuous clock behavior (DSI spec 5.6.1) */
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 9e524b51a001..2e848b816218 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -82,6 +82,7 @@ struct drm_mode_config_funcs {
*/
struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
/**
@@ -95,7 +96,7 @@ struct drm_mode_config_funcs {
* The format information specific to the given fb metadata, or
* NULL if none is found.
*/
- const struct drm_format_info *(*get_format_info)(const struct drm_mode_fb_cmd2 *mode_cmd);
+ const struct drm_format_info *(*get_format_info)(u32 pixel_format, u64 modifier);
/**
* @mode_valid:
diff --git a/include/drm/drm_modeset_helper.h b/include/drm/drm_modeset_helper.h
index 995fd981cab0..7e3d4c5a7f66 100644
--- a/include/drm/drm_modeset_helper.h
+++ b/include/drm/drm_modeset_helper.h
@@ -26,6 +26,7 @@
struct drm_crtc;
struct drm_crtc_funcs;
struct drm_device;
+struct drm_format_info;
struct drm_framebuffer;
struct drm_mode_fb_cmd2;
@@ -33,6 +34,7 @@ void drm_helper_move_panel_connectors_to_head(struct drm_device *);
void drm_helper_mode_fill_fb_struct(struct drm_device *dev,
struct drm_framebuffer *fb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
index 202c157ff4d7..e5f20a1235be 100644
--- a/include/drm/drm_pagemap.h
+++ b/include/drm/drm_pagemap.h
@@ -7,6 +7,7 @@
#include <linux/types.h>
struct drm_pagemap;
+struct drm_pagemap_zdd;
struct device;
/**
@@ -91,6 +92,35 @@ struct drm_pagemap_ops {
struct device *dev,
struct drm_pagemap_device_addr addr);
+ /**
+ * @populate_mm: Populate part of the mm with @dpagemap memory,
+ * migrating existing data.
+ * @dpagemap: The struct drm_pagemap managing the memory.
+ * @start: The virtual start address in @mm
+ * @end: The virtual end address in @mm
+ * @mm: Pointer to a live mm. The caller must have an mmget()
+ * reference.
+ *
+ * The caller will have the mm lock at least in read mode.
+ * Note that there is no guarantee that the memory is resident
+ * after the function returns, it's best effort only.
+ * When the mm is not using the memory anymore,
+ * it will be released. The struct drm_pagemap might have a
+ * mechanism in place to reclaim the memory and the data will
+ * then be migrated. Typically to system memory.
+ * The implementation should hold sufficient runtime power-
+ * references while pages are used in an address space and
+ * should ideally guard against hardware device unbind in
+ * a way such that device pages are migrated back to system
+ * followed by device page removal. The implementation should
+ * return -ENODEV after device removal.
+ *
+ * Return: 0 if successful. Negative error code on error.
+ */
+ int (*populate_mm)(struct drm_pagemap *dpagemap,
+ unsigned long start, unsigned long end,
+ struct mm_struct *mm,
+ unsigned long timeslice_ms);
};
/**
@@ -104,4 +134,109 @@ struct drm_pagemap {
struct device *dev;
};
+struct drm_pagemap_devmem;
+
+/**
+ * struct drm_pagemap_devmem_ops - Operations structure for GPU SVM device memory
+ *
+ * This structure defines the operations for GPU Shared Virtual Memory (SVM)
+ * device memory. These operations are provided by the GPU driver to manage device memory
+ * allocations and perform operations such as migration between device memory and system
+ * RAM.
+ */
+struct drm_pagemap_devmem_ops {
+ /**
+ * @devmem_release: Release device memory allocation (optional)
+ * @devmem_allocation: device memory allocation
+ *
+ * Release device memory allocation and drop a reference to device
+ * memory allocation.
+ */
+ void (*devmem_release)(struct drm_pagemap_devmem *devmem_allocation);
+
+ /**
+ * @populate_devmem_pfn: Populate device memory PFN (required for migration)
+ * @devmem_allocation: device memory allocation
+ * @npages: Number of pages to populate
+ * @pfn: Array of page frame numbers to populate
+ *
+ * Populate device memory page frame numbers (PFN).
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+ int (*populate_devmem_pfn)(struct drm_pagemap_devmem *devmem_allocation,
+ unsigned long npages, unsigned long *pfn);
+
+ /**
+ * @copy_to_devmem: Copy to device memory (required for migration)
+ * @pages: Pointer to array of device memory pages (destination)
+ * @dma_addr: Pointer to array of DMA addresses (source)
+ * @npages: Number of pages to copy
+ *
+ * Copy pages to device memory.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+ int (*copy_to_devmem)(struct page **pages,
+ dma_addr_t *dma_addr,
+ unsigned long npages);
+
+ /**
+ * @copy_to_ram: Copy to system RAM (required for migration)
+ * @pages: Pointer to array of device memory pages (source)
+ * @dma_addr: Pointer to array of DMA addresses (destination)
+ * @npages: Number of pages to copy
+ *
+ * Copy pages to system RAM.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+ int (*copy_to_ram)(struct page **pages,
+ dma_addr_t *dma_addr,
+ unsigned long npages);
+};
+
+/**
+ * struct drm_pagemap_devmem - Structure representing a GPU SVM device memory allocation
+ *
+ * @dev: Pointer to the device structure which device memory allocation belongs to
+ * @mm: Pointer to the mm_struct for the address space
+ * @detached: device memory allocations is detached from device pages
+ * @ops: Pointer to the operations structure for GPU SVM device memory
+ * @dpagemap: The struct drm_pagemap of the pages this allocation belongs to.
+ * @size: Size of device memory allocation
+ * @timeslice_expiration: Timeslice expiration in jiffies
+ */
+struct drm_pagemap_devmem {
+ struct device *dev;
+ struct mm_struct *mm;
+ struct completion detached;
+ const struct drm_pagemap_devmem_ops *ops;
+ struct drm_pagemap *dpagemap;
+ size_t size;
+ u64 timeslice_expiration;
+};
+
+int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end,
+ unsigned long timeslice_ms,
+ void *pgmap_owner);
+
+int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation);
+
+const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void);
+
+struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page);
+
+void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
+ struct device *dev, struct mm_struct *mm,
+ const struct drm_pagemap_devmem_ops *ops,
+ struct drm_pagemap *dpagemap, size_t size);
+
+int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
+ unsigned long start, unsigned long end,
+ struct mm_struct *mm,
+ unsigned long timeslice_ms);
+
#endif
diff --git a/include/drm/drm_panic.h b/include/drm/drm_panic.h
index 310c88c4d336..ac0e46b73436 100644
--- a/include/drm/drm_panic.h
+++ b/include/drm/drm_panic.h
@@ -72,6 +72,12 @@ struct drm_scanout_buffer {
void (*set_pixel)(struct drm_scanout_buffer *sb, unsigned int x,
unsigned int y, u32 color);
+ /**
+ * @private: private pointer that you can use in the callbacks
+ * set_pixel()
+ */
+ void *private;
+
};
#ifdef CONFIG_DRM_PANIC
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index fa085c44d4ca..f50f862f0d8b 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -100,6 +100,9 @@ struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt);
/* helper functions for importing */
+bool drm_gem_is_prime_exported_dma_buf(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+
struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
struct dma_buf *dma_buf,
struct device *attach_dev);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 1a7e377d4cbb..323a505e6e6a 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -192,7 +192,7 @@ struct drm_sched_entity {
* @last_scheduled:
*
* Points to the finished fence of the last scheduled job. Only written
- * by the scheduler thread, can be accessed locklessly from
+ * by drm_sched_entity_pop_job(). Can be accessed locklessly from
* drm_sched_job_arm() if the queue is empty.
*/
struct dma_fence __rcu *last_scheduled;
@@ -305,6 +305,13 @@ struct drm_sched_fence {
* @owner: job owner for debugging
*/
void *owner;
+
+ /**
+ * @drm_client_id:
+ *
+ * The client_id of the drm_file which owns the job.
+ */
+ uint64_t drm_client_id;
};
struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
@@ -319,7 +326,6 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* @finish_cb: the callback for the finished fence.
* @credits: the number of credits this job contributes to the scheduler
* @work: Helper to reschedule job kill to different context.
- * @id: a unique id assigned to each job scheduled on the scheduler.
* @karma: increment on every hang caused by this job. If this exceeds the hang
* limit of the scheduler then the job is marked guilty and will not
* be scheduled further.
@@ -332,8 +338,6 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* to schedule the job.
*/
struct drm_sched_job {
- u64 id;
-
/**
* @submit_ts:
*
@@ -387,13 +391,16 @@ struct drm_sched_job {
* enum drm_gpu_sched_stat - the scheduler's status
*
* @DRM_GPU_SCHED_STAT_NONE: Reserved. Do not use.
- * @DRM_GPU_SCHED_STAT_NOMINAL: Operation succeeded.
+ * @DRM_GPU_SCHED_STAT_RESET: The GPU hung and successfully reset.
* @DRM_GPU_SCHED_STAT_ENODEV: Error: Device is not available anymore.
+ * @DRM_GPU_SCHED_STAT_NO_HANG: Contrary to scheduler's assumption, the GPU
+ * did not hang and is still running.
*/
enum drm_gpu_sched_stat {
DRM_GPU_SCHED_STAT_NONE,
- DRM_GPU_SCHED_STAT_NOMINAL,
+ DRM_GPU_SCHED_STAT_RESET,
DRM_GPU_SCHED_STAT_ENODEV,
+ DRM_GPU_SCHED_STAT_NO_HANG,
};
/**
@@ -508,6 +515,24 @@ struct drm_sched_backend_ops {
* and it's time to clean it up.
*/
void (*free_job)(struct drm_sched_job *sched_job);
+
+ /**
+ * @cancel_job: Used by the scheduler to guarantee remaining jobs' fences
+ * get signaled in drm_sched_fini().
+ *
+ * Used by the scheduler to cancel all jobs that have not been executed
+ * with &struct drm_sched_backend_ops.run_job by the time
+ * drm_sched_fini() gets invoked.
+ *
+ * Drivers need to signal the passed job's hardware fence with an
+ * appropriate error code (e.g., -ECANCELED) in this callback. They
+ * must not free the job.
+ *
+ * The scheduler will only call this callback once it stopped calling
+ * all other callbacks forever, with the exception of &struct
+ * drm_sched_backend_ops.free_job.
+ */
+ void (*cancel_job)(struct drm_sched_job *sched_job);
};
/**
@@ -629,7 +654,8 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
- u32 credits, void *owner);
+ u32 credits, void *owner,
+ u64 drm_client_id);
void drm_sched_job_arm(struct drm_sched_job *job);
void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
int drm_sched_job_add_dependency(struct drm_sched_job *job,
diff --git a/include/drm/intel/pciids.h b/include/drm/intel/pciids.h
index a7ce9523c50d..76f8d26f9cc9 100644
--- a/include/drm/intel/pciids.h
+++ b/include/drm/intel/pciids.h
@@ -846,14 +846,18 @@
/* BMG */
#define INTEL_BMG_IDS(MACRO__, ...) \
MACRO__(0xE202, ## __VA_ARGS__), \
+ MACRO__(0xE209, ## __VA_ARGS__), \
MACRO__(0xE20B, ## __VA_ARGS__), \
MACRO__(0xE20C, ## __VA_ARGS__), \
MACRO__(0xE20D, ## __VA_ARGS__), \
MACRO__(0xE210, ## __VA_ARGS__), \
MACRO__(0xE211, ## __VA_ARGS__), \
MACRO__(0xE212, ## __VA_ARGS__), \
- MACRO__(0xE215, ## __VA_ARGS__), \
- MACRO__(0xE216, ## __VA_ARGS__)
+ MACRO__(0xE216, ## __VA_ARGS__), \
+ MACRO__(0xE220, ## __VA_ARGS__), \
+ MACRO__(0xE221, ## __VA_ARGS__), \
+ MACRO__(0xE222, ## __VA_ARGS__), \
+ MACRO__(0xE223, ## __VA_ARGS__)
/* PTL */
#define INTEL_PTL_IDS(MACRO__, ...) \
@@ -868,6 +872,8 @@
MACRO__(0xB08F, ## __VA_ARGS__), \
MACRO__(0xB090, ## __VA_ARGS__), \
MACRO__(0xB0A0, ## __VA_ARGS__), \
- MACRO__(0xB0B0, ## __VA_ARGS__)
+ MACRO__(0xB0B0, ## __VA_ARGS__), \
+ MACRO__(0xFD80, ## __VA_ARGS__), \
+ MACRO__(0xFD81, ## __VA_ARGS__)
#endif /* __PCIIDS_H__ */
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
index cf027558b6db..479b7ed075c0 100644
--- a/include/drm/ttm/ttm_bo.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -207,11 +207,9 @@ struct ttm_lru_walk_ops {
};
/**
- * struct ttm_lru_walk - Structure describing a LRU walk.
+ * struct ttm_lru_walk_arg - Common part for the variants of BO LRU walk.
*/
-struct ttm_lru_walk {
- /** @ops: Pointer to the ops structure. */
- const struct ttm_lru_walk_ops *ops;
+struct ttm_lru_walk_arg {
/** @ctx: Pointer to the struct ttm_operation_ctx. */
struct ttm_operation_ctx *ctx;
/** @ticket: The struct ww_acquire_ctx if any. */
@@ -220,6 +218,16 @@ struct ttm_lru_walk {
bool trylock_only;
};
+/**
+ * struct ttm_lru_walk - Structure describing a LRU walk.
+ */
+struct ttm_lru_walk {
+ /** @ops: Pointer to the ops structure. */
+ const struct ttm_lru_walk_ops *ops;
+ /** @arg: Common bo LRU walk arguments. */
+ struct ttm_lru_walk_arg arg;
+};
+
s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
struct ttm_resource_manager *man, s64 target);
@@ -245,34 +253,6 @@ bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_c
bool ttm_bo_shrink_avoid_wait(void);
/**
- * ttm_bo_get - reference a struct ttm_buffer_object
- *
- * @bo: The buffer object.
- */
-static inline void ttm_bo_get(struct ttm_buffer_object *bo)
-{
- kref_get(&bo->kref);
-}
-
-/**
- * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless
- * its refcount has already reached zero.
- * @bo: The buffer object.
- *
- * Used to reference a TTM buffer object in lookups where the object is removed
- * from the lookup structure during the destructor and for RCU lookups.
- *
- * Returns: @bo if the referencing was successful, NULL otherwise.
- */
-static inline __must_check struct ttm_buffer_object *
-ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
-{
- if (!kref_get_unless_zero(&bo->kref))
- return NULL;
- return bo;
-}
-
-/**
* ttm_bo_reserve:
*
* @bo: A pointer to a struct ttm_buffer_object.
@@ -429,6 +409,7 @@ int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
unsigned long num_pages, struct ttm_bo_kmap_obj *map);
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
+void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page);
int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map);
void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map);
int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
@@ -495,11 +476,6 @@ struct ttm_bo_lru_cursor {
/** @res_curs: Embedded struct ttm_resource_cursor. */
struct ttm_resource_cursor res_curs;
/**
- * @ctx: The struct ttm_operation_ctx used while looping.
- * governs the locking mode.
- */
- struct ttm_operation_ctx *ctx;
- /**
* @bo: Buffer object pointer if a buffer object is refcounted,
* NULL otherwise.
*/
@@ -509,6 +485,8 @@ struct ttm_bo_lru_cursor {
* unlock before the next iteration or after loop exit.
*/
bool needs_unlock;
+ /** @arg: Pointer to common BO LRU walk arguments. */
+ struct ttm_lru_walk_arg *arg;
};
void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs);
@@ -516,7 +494,7 @@ void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs);
struct ttm_bo_lru_cursor *
ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs,
struct ttm_resource_manager *man,
- struct ttm_operation_ctx *ctx);
+ struct ttm_lru_walk_arg *arg);
struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs);
@@ -527,9 +505,9 @@ struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs)
*/
DEFINE_CLASS(ttm_bo_lru_cursor, struct ttm_bo_lru_cursor *,
if (_T) {ttm_bo_lru_cursor_fini(_T); },
- ttm_bo_lru_cursor_init(curs, man, ctx),
+ ttm_bo_lru_cursor_init(curs, man, arg),
struct ttm_bo_lru_cursor *curs, struct ttm_resource_manager *man,
- struct ttm_operation_ctx *ctx);
+ struct ttm_lru_walk_arg *arg);
static inline void *
class_ttm_bo_lru_cursor_lock_ptr(class_ttm_bo_lru_cursor_t *_T)
{ return *_T; }
@@ -540,7 +518,7 @@ class_ttm_bo_lru_cursor_lock_ptr(class_ttm_bo_lru_cursor_t *_T)
* resources on LRU lists.
* @_cursor: struct ttm_bo_lru_cursor to use for the iteration.
* @_man: The resource manager whose LRU lists to iterate over.
- * @_ctx: The struct ttm_operation_context to govern the @_bo locking.
+ * @_arg: The struct ttm_lru_walk_arg to govern the LRU walk.
* @_bo: The struct ttm_buffer_object pointer pointing to the buffer object
* for the current iteration.
*
@@ -552,10 +530,15 @@ class_ttm_bo_lru_cursor_lock_ptr(class_ttm_bo_lru_cursor_t *_T)
* up at looping termination, even if terminated prematurely by, for
* example a return or break statement. Exiting the loop will also unlock
* (if needed) and unreference @_bo.
+ *
+ * Return: If locking of a bo returns an error, then iteration is terminated
+ * and @_bo is set to a corresponding error pointer. It's illegal to
+ * dereference @_bo after loop exit.
*/
-#define ttm_bo_lru_for_each_reserved_guarded(_cursor, _man, _ctx, _bo) \
- scoped_guard(ttm_bo_lru_cursor, _cursor, _man, _ctx) \
- for ((_bo) = ttm_bo_lru_cursor_first(_cursor); (_bo); \
- (_bo) = ttm_bo_lru_cursor_next(_cursor))
+#define ttm_bo_lru_for_each_reserved_guarded(_cursor, _man, _arg, _bo) \
+ scoped_guard(ttm_bo_lru_cursor, _cursor, _man, _arg) \
+ for ((_bo) = ttm_bo_lru_cursor_first(_cursor); \
+ !IS_ERR_OR_NULL(_bo); \
+ (_bo) = ttm_bo_lru_cursor_next(_cursor))
#endif
diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h
index 39b8636b1845..592b5f802859 100644
--- a/include/drm/ttm/ttm_device.h
+++ b/include/drm/ttm/ttm_device.h
@@ -272,6 +272,7 @@ struct ttm_device {
int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags);
int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags);
+int ttm_device_prepare_hibernation(struct ttm_device *bdev);
static inline struct ttm_resource_manager *
ttm_manager_type(struct ttm_device *bdev, int mem_type)
diff --git a/include/dt-bindings/clock/ast2600-clock.h b/include/dt-bindings/clock/ast2600-clock.h
index 7ae96c7bd72f..f60fff261130 100644
--- a/include/dt-bindings/clock/ast2600-clock.h
+++ b/include/dt-bindings/clock/ast2600-clock.h
@@ -122,6 +122,8 @@
#define ASPEED_RESET_PCIE_DEV_OEN 20
#define ASPEED_RESET_PCIE_RC_O 19
#define ASPEED_RESET_PCIE_RC_OEN 18
+#define ASPEED_RESET_MAC2 12
+#define ASPEED_RESET_MAC1 11
#define ASPEED_RESET_PCI_DP 5
#define ASPEED_RESET_HACE 4
#define ASPEED_RESET_AHB 1
diff --git a/include/dt-bindings/clock/nxp,imx94-clock.h b/include/dt-bindings/clock/nxp,imx94-clock.h
new file mode 100644
index 000000000000..c4ba13352b99
--- /dev/null
+++ b/include/dt-bindings/clock/nxp,imx94-clock.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright 2025 NXP
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX94_H
+#define __DT_BINDINGS_CLOCK_IMX94_H
+
+#define IMX94_CLK_DISPMIX_CLK_SEL 0
+
+#define IMX94_CLK_DISPMIX_LVDS_CLK_GATE 0
+
+#endif /* __DT_BINDINGS_CLOCK_IMX94_H */
diff --git a/include/dt-bindings/clock/qcom,ipq5018-cmn-pll.h b/include/dt-bindings/clock/qcom,ipq5018-cmn-pll.h
new file mode 100644
index 000000000000..586d1c9b33b3
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,ipq5018-cmn-pll.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_IPQ5018_CMN_PLL_H
+#define _DT_BINDINGS_CLK_QCOM_IPQ5018_CMN_PLL_H
+
+/* CMN PLL core clock. */
+#define IPQ5018_CMN_PLL_CLK 0
+
+/* The output clocks from CMN PLL of IPQ5018. */
+#define IPQ5018_XO_24MHZ_CLK 1
+#define IPQ5018_SLEEP_32KHZ_CLK 2
+#define IPQ5018_ETH_50MHZ_CLK 3
+#endif
diff --git a/include/dt-bindings/clock/qcom,ipq5424-cmn-pll.h b/include/dt-bindings/clock/qcom,ipq5424-cmn-pll.h
new file mode 100644
index 000000000000..f643c2668c04
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,ipq5424-cmn-pll.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_IPQ5424_CMN_PLL_H
+#define _DT_BINDINGS_CLK_QCOM_IPQ5424_CMN_PLL_H
+
+/* CMN PLL core clock. */
+#define IPQ5424_CMN_PLL_CLK 0
+
+/* The output clocks from CMN PLL of IPQ5424. */
+#define IPQ5424_XO_24MHZ_CLK 1
+#define IPQ5424_SLEEP_32KHZ_CLK 2
+#define IPQ5424_PCS_31P25MHZ_CLK 3
+#define IPQ5424_NSS_300MHZ_CLK 4
+#define IPQ5424_PPE_375MHZ_CLK 5
+#define IPQ5424_ETH0_50MHZ_CLK 6
+#define IPQ5424_ETH1_50MHZ_CLK 7
+#define IPQ5424_ETH2_50MHZ_CLK 8
+#define IPQ5424_ETH_25MHZ_CLK 9
+#endif
diff --git a/include/dt-bindings/clock/qcom,milos-camcc.h b/include/dt-bindings/clock/qcom,milos-camcc.h
new file mode 100644
index 000000000000..21925dca9a20
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,milos-camcc.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_MILOS_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_MILOS_H
+
+/* CAM_CC clocks */
+#define CAM_CC_PLL0 0
+#define CAM_CC_PLL0_OUT_EVEN 1
+#define CAM_CC_PLL0_OUT_ODD 2
+#define CAM_CC_PLL1 3
+#define CAM_CC_PLL1_OUT_EVEN 4
+#define CAM_CC_PLL2 5
+#define CAM_CC_PLL2_OUT_EVEN 6
+#define CAM_CC_PLL3 7
+#define CAM_CC_PLL3_OUT_EVEN 8
+#define CAM_CC_PLL4 9
+#define CAM_CC_PLL4_OUT_EVEN 10
+#define CAM_CC_PLL5 11
+#define CAM_CC_PLL5_OUT_EVEN 12
+#define CAM_CC_PLL6 13
+#define CAM_CC_PLL6_OUT_EVEN 14
+#define CAM_CC_BPS_AHB_CLK 15
+#define CAM_CC_BPS_AREG_CLK 16
+#define CAM_CC_BPS_CLK 17
+#define CAM_CC_BPS_CLK_SRC 18
+#define CAM_CC_CAMNOC_ATB_CLK 19
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 20
+#define CAM_CC_CAMNOC_AXI_HF_CLK 21
+#define CAM_CC_CAMNOC_AXI_SF_CLK 22
+#define CAM_CC_CAMNOC_NRT_AXI_CLK 23
+#define CAM_CC_CAMNOC_RT_AXI_CLK 24
+#define CAM_CC_CCI_0_CLK 25
+#define CAM_CC_CCI_0_CLK_SRC 26
+#define CAM_CC_CCI_1_CLK 27
+#define CAM_CC_CCI_1_CLK_SRC 28
+#define CAM_CC_CORE_AHB_CLK 29
+#define CAM_CC_CPAS_AHB_CLK 30
+#define CAM_CC_CPHY_RX_CLK_SRC 31
+#define CAM_CC_CRE_AHB_CLK 32
+#define CAM_CC_CRE_CLK 33
+#define CAM_CC_CRE_CLK_SRC 34
+#define CAM_CC_CSI0PHYTIMER_CLK 35
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 36
+#define CAM_CC_CSI1PHYTIMER_CLK 37
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 38
+#define CAM_CC_CSI2PHYTIMER_CLK 39
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 40
+#define CAM_CC_CSI3PHYTIMER_CLK 41
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 42
+#define CAM_CC_CSIPHY0_CLK 43
+#define CAM_CC_CSIPHY1_CLK 44
+#define CAM_CC_CSIPHY2_CLK 45
+#define CAM_CC_CSIPHY3_CLK 46
+#define CAM_CC_FAST_AHB_CLK_SRC 47
+#define CAM_CC_GDSC_CLK 48
+#define CAM_CC_ICP_ATB_CLK 49
+#define CAM_CC_ICP_CLK 50
+#define CAM_CC_ICP_CLK_SRC 51
+#define CAM_CC_ICP_CTI_CLK 52
+#define CAM_CC_ICP_TS_CLK 53
+#define CAM_CC_MCLK0_CLK 54
+#define CAM_CC_MCLK0_CLK_SRC 55
+#define CAM_CC_MCLK1_CLK 56
+#define CAM_CC_MCLK1_CLK_SRC 57
+#define CAM_CC_MCLK2_CLK 58
+#define CAM_CC_MCLK2_CLK_SRC 59
+#define CAM_CC_MCLK3_CLK 60
+#define CAM_CC_MCLK3_CLK_SRC 61
+#define CAM_CC_MCLK4_CLK 62
+#define CAM_CC_MCLK4_CLK_SRC 63
+#define CAM_CC_OPE_0_AHB_CLK 64
+#define CAM_CC_OPE_0_AREG_CLK 65
+#define CAM_CC_OPE_0_CLK 66
+#define CAM_CC_OPE_0_CLK_SRC 67
+#define CAM_CC_SLEEP_CLK 68
+#define CAM_CC_SLEEP_CLK_SRC 69
+#define CAM_CC_SLOW_AHB_CLK_SRC 70
+#define CAM_CC_SOC_AHB_CLK 71
+#define CAM_CC_SYS_TMR_CLK 72
+#define CAM_CC_TFE_0_AHB_CLK 73
+#define CAM_CC_TFE_0_CLK 74
+#define CAM_CC_TFE_0_CLK_SRC 75
+#define CAM_CC_TFE_0_CPHY_RX_CLK 76
+#define CAM_CC_TFE_0_CSID_CLK 77
+#define CAM_CC_TFE_0_CSID_CLK_SRC 78
+#define CAM_CC_TFE_1_AHB_CLK 79
+#define CAM_CC_TFE_1_CLK 80
+#define CAM_CC_TFE_1_CLK_SRC 81
+#define CAM_CC_TFE_1_CPHY_RX_CLK 82
+#define CAM_CC_TFE_1_CSID_CLK 83
+#define CAM_CC_TFE_1_CSID_CLK_SRC 84
+#define CAM_CC_TFE_2_AHB_CLK 85
+#define CAM_CC_TFE_2_CLK 86
+#define CAM_CC_TFE_2_CLK_SRC 87
+#define CAM_CC_TFE_2_CPHY_RX_CLK 88
+#define CAM_CC_TFE_2_CSID_CLK 89
+#define CAM_CC_TFE_2_CSID_CLK_SRC 90
+#define CAM_CC_TOP_SHIFT_CLK 91
+#define CAM_CC_XO_CLK_SRC 92
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_CAMNOC_BCR 1
+#define CAM_CC_CAMSS_TOP_BCR 2
+#define CAM_CC_CCI_0_BCR 3
+#define CAM_CC_CCI_1_BCR 4
+#define CAM_CC_CPAS_BCR 5
+#define CAM_CC_CRE_BCR 6
+#define CAM_CC_CSI0PHY_BCR 7
+#define CAM_CC_CSI1PHY_BCR 8
+#define CAM_CC_CSI2PHY_BCR 9
+#define CAM_CC_CSI3PHY_BCR 10
+#define CAM_CC_ICP_BCR 11
+#define CAM_CC_MCLK0_BCR 12
+#define CAM_CC_MCLK1_BCR 13
+#define CAM_CC_MCLK2_BCR 14
+#define CAM_CC_MCLK3_BCR 15
+#define CAM_CC_MCLK4_BCR 16
+#define CAM_CC_OPE_0_BCR 17
+#define CAM_CC_TFE_0_BCR 18
+#define CAM_CC_TFE_1_BCR 19
+#define CAM_CC_TFE_2_BCR 20
+
+/* CAM_CC power domains */
+#define CAM_CC_CAMSS_TOP_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,milos-dispcc.h b/include/dt-bindings/clock/qcom,milos-dispcc.h
new file mode 100644
index 000000000000..c70f23f32f0a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,milos-dispcc.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_MILOS_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_MILOS_H
+
+/* DISP_CC clocks */
+#define DISP_CC_PLL0 0
+#define DISP_CC_MDSS_ACCU_CLK 1
+#define DISP_CC_MDSS_AHB1_CLK 2
+#define DISP_CC_MDSS_AHB_CLK 3
+#define DISP_CC_MDSS_AHB_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_CLK 5
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 6
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 7
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 8
+#define DISP_CC_MDSS_DPTX0_AUX_CLK 9
+#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 10
+#define DISP_CC_MDSS_DPTX0_CRYPTO_CLK 11
+#define DISP_CC_MDSS_DPTX0_LINK_CLK 12
+#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 13
+#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 14
+#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 15
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 16
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 17
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 18
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 19
+#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 20
+#define DISP_CC_MDSS_ESC0_CLK 21
+#define DISP_CC_MDSS_ESC0_CLK_SRC 22
+#define DISP_CC_MDSS_MDP1_CLK 23
+#define DISP_CC_MDSS_MDP_CLK 24
+#define DISP_CC_MDSS_MDP_CLK_SRC 25
+#define DISP_CC_MDSS_MDP_LUT1_CLK 26
+#define DISP_CC_MDSS_MDP_LUT_CLK 27
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 28
+#define DISP_CC_MDSS_PCLK0_CLK 29
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 30
+#define DISP_CC_MDSS_RSCC_AHB_CLK 31
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 32
+#define DISP_CC_MDSS_VSYNC1_CLK 33
+#define DISP_CC_MDSS_VSYNC_CLK 34
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 35
+#define DISP_CC_SLEEP_CLK 36
+#define DISP_CC_SLEEP_CLK_SRC 37
+#define DISP_CC_XO_CLK 38
+#define DISP_CC_XO_CLK_SRC 39
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_CORE_INT2_BCR 1
+#define DISP_CC_MDSS_RSCC_BCR 2
+
+/* DISP_CC power domains */
+#define DISP_CC_MDSS_CORE_GDSC 0
+#define DISP_CC_MDSS_CORE_INT2_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,milos-gcc.h b/include/dt-bindings/clock/qcom,milos-gcc.h
new file mode 100644
index 000000000000..a530ca39e1ef
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,milos-gcc.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_MILOS_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_MILOS_H
+
+/* GCC clocks */
+#define GCC_GPLL0 0
+#define GCC_GPLL0_OUT_EVEN 1
+#define GCC_GPLL2 2
+#define GCC_GPLL4 3
+#define GCC_GPLL6 4
+#define GCC_GPLL7 5
+#define GCC_GPLL9 6
+#define GCC_AGGRE_NOC_PCIE_AXI_CLK 7
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 8
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 9
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 10
+#define GCC_BOOT_ROM_AHB_CLK 11
+#define GCC_CAMERA_AHB_CLK 12
+#define GCC_CAMERA_HF_AXI_CLK 13
+#define GCC_CAMERA_HF_XO_CLK 14
+#define GCC_CAMERA_SF_AXI_CLK 15
+#define GCC_CAMERA_SF_XO_CLK 16
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 17
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 18
+#define GCC_CNOC_PCIE_SF_AXI_CLK 19
+#define GCC_DDRSS_GPU_AXI_CLK 20
+#define GCC_DDRSS_PCIE_SF_QTB_CLK 21
+#define GCC_DISP_AHB_CLK 22
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 23
+#define GCC_DISP_HF_AXI_CLK 24
+#define GCC_DISP_XO_CLK 25
+#define GCC_GP1_CLK 26
+#define GCC_GP1_CLK_SRC 27
+#define GCC_GP2_CLK 28
+#define GCC_GP2_CLK_SRC 29
+#define GCC_GP3_CLK 30
+#define GCC_GP3_CLK_SRC 31
+#define GCC_GPU_CFG_AHB_CLK 32
+#define GCC_GPU_GPLL0_CLK_SRC 33
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 34
+#define GCC_GPU_MEMNOC_GFX_CLK 35
+#define GCC_GPU_SNOC_DVM_GFX_CLK 36
+#define GCC_PCIE_0_AUX_CLK 37
+#define GCC_PCIE_0_AUX_CLK_SRC 38
+#define GCC_PCIE_0_CFG_AHB_CLK 39
+#define GCC_PCIE_0_MSTR_AXI_CLK 40
+#define GCC_PCIE_0_PHY_RCHNG_CLK 41
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 42
+#define GCC_PCIE_0_PIPE_CLK 43
+#define GCC_PCIE_0_PIPE_CLK_SRC 44
+#define GCC_PCIE_0_PIPE_DIV2_CLK 45
+#define GCC_PCIE_0_PIPE_DIV2_CLK_SRC 46
+#define GCC_PCIE_0_SLV_AXI_CLK 47
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 48
+#define GCC_PCIE_1_AUX_CLK 49
+#define GCC_PCIE_1_AUX_CLK_SRC 50
+#define GCC_PCIE_1_CFG_AHB_CLK 51
+#define GCC_PCIE_1_MSTR_AXI_CLK 52
+#define GCC_PCIE_1_PHY_RCHNG_CLK 53
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 54
+#define GCC_PCIE_1_PIPE_CLK 55
+#define GCC_PCIE_1_PIPE_CLK_SRC 56
+#define GCC_PCIE_1_PIPE_DIV2_CLK 57
+#define GCC_PCIE_1_PIPE_DIV2_CLK_SRC 58
+#define GCC_PCIE_1_SLV_AXI_CLK 59
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 60
+#define GCC_PCIE_RSCC_CFG_AHB_CLK 61
+#define GCC_PCIE_RSCC_XO_CLK 62
+#define GCC_PDM2_CLK 63
+#define GCC_PDM2_CLK_SRC 64
+#define GCC_PDM_AHB_CLK 65
+#define GCC_PDM_XO4_CLK 66
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 67
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 68
+#define GCC_QMIP_DISP_AHB_CLK 69
+#define GCC_QMIP_GPU_AHB_CLK 70
+#define GCC_QMIP_PCIE_AHB_CLK 71
+#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 72
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 73
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 74
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 75
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 76
+#define GCC_QUPV3_WRAP0_CORE_CLK 77
+#define GCC_QUPV3_WRAP0_QSPI_REF_CLK 78
+#define GCC_QUPV3_WRAP0_QSPI_REF_CLK_SRC 79
+#define GCC_QUPV3_WRAP0_S0_CLK 80
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 81
+#define GCC_QUPV3_WRAP0_S1_CLK 82
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 83
+#define GCC_QUPV3_WRAP0_S2_CLK 84
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 85
+#define GCC_QUPV3_WRAP0_S3_CLK 86
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 87
+#define GCC_QUPV3_WRAP0_S4_CLK 88
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 89
+#define GCC_QUPV3_WRAP0_S5_CLK 90
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 91
+#define GCC_QUPV3_WRAP0_S6_CLK 92
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 93
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 94
+#define GCC_QUPV3_WRAP1_CORE_CLK 95
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK 96
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK_SRC 97
+#define GCC_QUPV3_WRAP1_S0_CLK 98
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 99
+#define GCC_QUPV3_WRAP1_S1_CLK 100
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 101
+#define GCC_QUPV3_WRAP1_S2_CLK 102
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 103
+#define GCC_QUPV3_WRAP1_S3_CLK 104
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 105
+#define GCC_QUPV3_WRAP1_S4_CLK 106
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 107
+#define GCC_QUPV3_WRAP1_S5_CLK 108
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 109
+#define GCC_QUPV3_WRAP1_S6_CLK 110
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 111
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 112
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 113
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 114
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 115
+#define GCC_SDCC1_AHB_CLK 116
+#define GCC_SDCC1_APPS_CLK 117
+#define GCC_SDCC1_APPS_CLK_SRC 118
+#define GCC_SDCC1_ICE_CORE_CLK 119
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 120
+#define GCC_SDCC2_AHB_CLK 121
+#define GCC_SDCC2_APPS_CLK 122
+#define GCC_SDCC2_APPS_CLK_SRC 123
+#define GCC_UFS_PHY_AHB_CLK 124
+#define GCC_UFS_PHY_AXI_CLK 125
+#define GCC_UFS_PHY_AXI_CLK_SRC 126
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 127
+#define GCC_UFS_PHY_ICE_CORE_CLK 128
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 129
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 130
+#define GCC_UFS_PHY_PHY_AUX_CLK 131
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 132
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 133
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 134
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 135
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 136
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 137
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 138
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 139
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 140
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 141
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 142
+#define GCC_USB30_PRIM_ATB_CLK 143
+#define GCC_USB30_PRIM_MASTER_CLK 144
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 145
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 146
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 147
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 148
+#define GCC_USB30_PRIM_SLEEP_CLK 149
+#define GCC_USB3_PRIM_PHY_AUX_CLK 150
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 151
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 152
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 153
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 154
+#define GCC_VIDEO_AHB_CLK 155
+#define GCC_VIDEO_AXI0_CLK 156
+#define GCC_VIDEO_XO_CLK 157
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY_BCR 1
+#define GCC_GPU_BCR 2
+#define GCC_PCIE_0_BCR 3
+#define GCC_PCIE_0_LINK_DOWN_BCR 4
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_PHY_BCR 6
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7
+#define GCC_PCIE_1_BCR 8
+#define GCC_PCIE_1_LINK_DOWN_BCR 9
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_1_PHY_BCR 11
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 12
+#define GCC_PCIE_RSCC_BCR 13
+#define GCC_PDM_BCR 14
+#define GCC_QUPV3_WRAPPER_0_BCR 15
+#define GCC_QUPV3_WRAPPER_1_BCR 16
+#define GCC_QUSB2PHY_PRIM_BCR 17
+#define GCC_QUSB2PHY_SEC_BCR 18
+#define GCC_SDCC1_BCR 19
+#define GCC_SDCC2_BCR 20
+#define GCC_UFS_PHY_BCR 21
+#define GCC_USB30_PRIM_BCR 22
+#define GCC_USB3_DP_PHY_PRIM_BCR 23
+#define GCC_USB3_PHY_PRIM_BCR 24
+#define GCC_USB3PHY_PHY_PRIM_BCR 25
+#define GCC_VIDEO_AXI0_CLK_ARES 26
+#define GCC_VIDEO_BCR 27
+
+/* GCC power domains */
+#define PCIE_0_GDSC 0
+#define PCIE_0_PHY_GDSC 1
+#define PCIE_1_GDSC 2
+#define PCIE_1_PHY_GDSC 3
+#define UFS_PHY_GDSC 4
+#define UFS_MEM_PHY_GDSC 5
+#define USB30_PRIM_GDSC 6
+#define USB3_PHY_GDSC 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,milos-gpucc.h b/include/dt-bindings/clock/qcom,milos-gpucc.h
new file mode 100644
index 000000000000..6ff1925d409f
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,milos-gpucc.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_MILOS_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_MILOS_H
+
+/* GPU_CC clocks */
+#define GPU_CC_PLL0 0
+#define GPU_CC_PLL0_OUT_EVEN 1
+#define GPU_CC_AHB_CLK 2
+#define GPU_CC_CB_CLK 3
+#define GPU_CC_CX_ACCU_SHIFT_CLK 4
+#define GPU_CC_CX_FF_CLK 5
+#define GPU_CC_CX_GMU_CLK 6
+#define GPU_CC_CXO_AON_CLK 7
+#define GPU_CC_CXO_CLK 8
+#define GPU_CC_DEMET_CLK 9
+#define GPU_CC_DEMET_DIV_CLK_SRC 10
+#define GPU_CC_DPM_CLK 11
+#define GPU_CC_FF_CLK_SRC 12
+#define GPU_CC_FREQ_MEASURE_CLK 13
+#define GPU_CC_GMU_CLK_SRC 14
+#define GPU_CC_GX_ACCU_SHIFT_CLK 15
+#define GPU_CC_GX_ACD_AHB_FF_CLK 16
+#define GPU_CC_GX_AHB_FF_CLK 17
+#define GPU_CC_GX_GMU_CLK 18
+#define GPU_CC_GX_RCG_AHB_FF_CLK 19
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 20
+#define GPU_CC_HUB_AON_CLK 21
+#define GPU_CC_HUB_CLK_SRC 22
+#define GPU_CC_HUB_CX_INT_CLK 23
+#define GPU_CC_HUB_DIV_CLK_SRC 24
+#define GPU_CC_MEMNOC_GFX_CLK 25
+#define GPU_CC_RSCC_HUB_AON_CLK 26
+#define GPU_CC_RSCC_XO_AON_CLK 27
+#define GPU_CC_SLEEP_CLK 28
+#define GPU_CC_XO_CLK_SRC 29
+#define GPU_CC_XO_DIV_CLK_SRC 30
+
+/* GPU_CC resets */
+#define GPU_CC_CB_BCR 0
+#define GPU_CC_CX_BCR 1
+#define GPU_CC_FAST_HUB_BCR 2
+#define GPU_CC_FF_BCR 3
+#define GPU_CC_GMU_BCR 4
+#define GPU_CC_GX_BCR 5
+#define GPU_CC_RBCPR_BCR 6
+#define GPU_CC_XO_BCR 7
+
+/* GPU_CC power domains */
+#define GPU_CC_CX_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,milos-videocc.h b/include/dt-bindings/clock/qcom,milos-videocc.h
new file mode 100644
index 000000000000..3544db81ffae
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,milos-videocc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_MILOS_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_MILOS_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_CC_PLL0 0
+#define VIDEO_CC_AHB_CLK 1
+#define VIDEO_CC_AHB_CLK_SRC 2
+#define VIDEO_CC_MVS0_CLK 3
+#define VIDEO_CC_MVS0_CLK_SRC 4
+#define VIDEO_CC_MVS0_DIV_CLK_SRC 5
+#define VIDEO_CC_MVS0_SHIFT_CLK 6
+#define VIDEO_CC_MVS0C_CLK 7
+#define VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC 8
+#define VIDEO_CC_MVS0C_SHIFT_CLK 9
+#define VIDEO_CC_SLEEP_CLK 10
+#define VIDEO_CC_SLEEP_CLK_SRC 11
+#define VIDEO_CC_XO_CLK 12
+#define VIDEO_CC_XO_CLK_SRC 13
+
+/* VIDEO_CC resets */
+#define VIDEO_CC_INTERFACE_BCR 0
+#define VIDEO_CC_MVS0_BCR 1
+#define VIDEO_CC_MVS0C_CLK_ARES 2
+#define VIDEO_CC_MVS0C_BCR 3
+
+/* VIDEO_CC power domains */
+#define VIDEO_CC_MVS0_GDSC 0
+#define VIDEO_CC_MVS0C_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcs615-camcc.h b/include/dt-bindings/clock/qcom,qcs615-camcc.h
new file mode 100644
index 000000000000..aec57dddc067
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcs615-camcc.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_QCS615_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_QCS615_H
+
+/* CAM_CC clocks */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_AREG_CLK 1
+#define CAM_CC_BPS_AXI_CLK 2
+#define CAM_CC_BPS_CLK 3
+#define CAM_CC_BPS_CLK_SRC 4
+#define CAM_CC_CAMNOC_ATB_CLK 5
+#define CAM_CC_CAMNOC_AXI_CLK 6
+#define CAM_CC_CCI_CLK 7
+#define CAM_CC_CCI_CLK_SRC 8
+#define CAM_CC_CORE_AHB_CLK 9
+#define CAM_CC_CPAS_AHB_CLK 10
+#define CAM_CC_CPHY_RX_CLK_SRC 11
+#define CAM_CC_CSI0PHYTIMER_CLK 12
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 13
+#define CAM_CC_CSI1PHYTIMER_CLK 14
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 15
+#define CAM_CC_CSI2PHYTIMER_CLK 16
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 17
+#define CAM_CC_CSIPHY0_CLK 18
+#define CAM_CC_CSIPHY1_CLK 19
+#define CAM_CC_CSIPHY2_CLK 20
+#define CAM_CC_FAST_AHB_CLK_SRC 21
+#define CAM_CC_ICP_ATB_CLK 22
+#define CAM_CC_ICP_CLK 23
+#define CAM_CC_ICP_CLK_SRC 24
+#define CAM_CC_ICP_CTI_CLK 25
+#define CAM_CC_ICP_TS_CLK 26
+#define CAM_CC_IFE_0_AXI_CLK 27
+#define CAM_CC_IFE_0_CLK 28
+#define CAM_CC_IFE_0_CLK_SRC 29
+#define CAM_CC_IFE_0_CPHY_RX_CLK 30
+#define CAM_CC_IFE_0_CSID_CLK 31
+#define CAM_CC_IFE_0_CSID_CLK_SRC 32
+#define CAM_CC_IFE_0_DSP_CLK 33
+#define CAM_CC_IFE_1_AXI_CLK 34
+#define CAM_CC_IFE_1_CLK 35
+#define CAM_CC_IFE_1_CLK_SRC 36
+#define CAM_CC_IFE_1_CPHY_RX_CLK 37
+#define CAM_CC_IFE_1_CSID_CLK 38
+#define CAM_CC_IFE_1_CSID_CLK_SRC 39
+#define CAM_CC_IFE_1_DSP_CLK 40
+#define CAM_CC_IFE_LITE_CLK 41
+#define CAM_CC_IFE_LITE_CLK_SRC 42
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 43
+#define CAM_CC_IFE_LITE_CSID_CLK 44
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 45
+#define CAM_CC_IPE_0_AHB_CLK 46
+#define CAM_CC_IPE_0_AREG_CLK 47
+#define CAM_CC_IPE_0_AXI_CLK 48
+#define CAM_CC_IPE_0_CLK 49
+#define CAM_CC_IPE_0_CLK_SRC 50
+#define CAM_CC_JPEG_CLK 51
+#define CAM_CC_JPEG_CLK_SRC 52
+#define CAM_CC_LRME_CLK 53
+#define CAM_CC_LRME_CLK_SRC 54
+#define CAM_CC_MCLK0_CLK 55
+#define CAM_CC_MCLK0_CLK_SRC 56
+#define CAM_CC_MCLK1_CLK 57
+#define CAM_CC_MCLK1_CLK_SRC 58
+#define CAM_CC_MCLK2_CLK 59
+#define CAM_CC_MCLK2_CLK_SRC 60
+#define CAM_CC_MCLK3_CLK 61
+#define CAM_CC_MCLK3_CLK_SRC 62
+#define CAM_CC_PLL0 63
+#define CAM_CC_PLL1 64
+#define CAM_CC_PLL2 65
+#define CAM_CC_PLL2_OUT_AUX2 66
+#define CAM_CC_PLL3 67
+#define CAM_CC_SLOW_AHB_CLK_SRC 68
+#define CAM_CC_SOC_AHB_CLK 69
+#define CAM_CC_SYS_TMR_CLK 70
+
+/* CAM_CC power domains */
+#define BPS_GDSC 0
+#define IFE_0_GDSC 1
+#define IFE_1_GDSC 2
+#define IPE_0_GDSC 3
+#define TITAN_TOP_GDSC 4
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_CAMNOC_BCR 1
+#define CAM_CC_CCI_BCR 2
+#define CAM_CC_CPAS_BCR 3
+#define CAM_CC_CSI0PHY_BCR 4
+#define CAM_CC_CSI1PHY_BCR 5
+#define CAM_CC_CSI2PHY_BCR 6
+#define CAM_CC_ICP_BCR 7
+#define CAM_CC_IFE_0_BCR 8
+#define CAM_CC_IFE_1_BCR 9
+#define CAM_CC_IFE_LITE_BCR 10
+#define CAM_CC_IPE_0_BCR 11
+#define CAM_CC_JPEG_BCR 12
+#define CAM_CC_LRME_BCR 13
+#define CAM_CC_MCLK0_BCR 14
+#define CAM_CC_MCLK1_BCR 15
+#define CAM_CC_MCLK2_BCR 16
+#define CAM_CC_MCLK3_BCR 17
+#define CAM_CC_TITAN_TOP_BCR 18
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcs615-dispcc.h b/include/dt-bindings/clock/qcom,qcs615-dispcc.h
new file mode 100644
index 000000000000..9a29945c5762
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcs615-dispcc.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_QCS615_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_QCS615_H
+
+/* DISP_CC clocks */
+#define DISP_CC_MDSS_AHB_CLK 0
+#define DISP_CC_MDSS_AHB_CLK_SRC 1
+#define DISP_CC_MDSS_BYTE0_CLK 2
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 3
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 5
+#define DISP_CC_MDSS_DP_AUX_CLK 6
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC 7
+#define DISP_CC_MDSS_DP_CRYPTO_CLK 8
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC 9
+#define DISP_CC_MDSS_DP_LINK_CLK 10
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC 11
+#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC 12
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK 13
+#define DISP_CC_MDSS_DP_PIXEL1_CLK 14
+#define DISP_CC_MDSS_DP_PIXEL1_CLK_SRC 15
+#define DISP_CC_MDSS_DP_PIXEL_CLK 16
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 17
+#define DISP_CC_MDSS_ESC0_CLK 18
+#define DISP_CC_MDSS_ESC0_CLK_SRC 19
+#define DISP_CC_MDSS_MDP_CLK 20
+#define DISP_CC_MDSS_MDP_CLK_SRC 21
+#define DISP_CC_MDSS_MDP_LUT_CLK 22
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 23
+#define DISP_CC_MDSS_PCLK0_CLK 24
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 25
+#define DISP_CC_MDSS_ROT_CLK 26
+#define DISP_CC_MDSS_ROT_CLK_SRC 27
+#define DISP_CC_MDSS_RSCC_AHB_CLK 28
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 29
+#define DISP_CC_MDSS_VSYNC_CLK 30
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 31
+#define DISP_CC_PLL0 32
+#define DISP_CC_XO_CLK 33
+
+/* DISP_CC power domains */
+#define MDSS_CORE_GDSC 0
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_RSCC_BCR 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcs615-gpucc.h b/include/dt-bindings/clock/qcom,qcs615-gpucc.h
new file mode 100644
index 000000000000..6d8394b90d59
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcs615-gpucc.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_QCS615_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_QCS615_H
+
+/* GPU_CC clocks */
+#define CRC_DIV_PLL0 0
+#define CRC_DIV_PLL1 1
+#define GPU_CC_PLL0 2
+#define GPU_CC_PLL1 3
+#define GPU_CC_CRC_AHB_CLK 4
+#define GPU_CC_CX_GFX3D_CLK 5
+#define GPU_CC_CX_GFX3D_SLV_CLK 6
+#define GPU_CC_CX_GMU_CLK 7
+#define GPU_CC_CX_SNOC_DVM_CLK 8
+#define GPU_CC_CXO_AON_CLK 9
+#define GPU_CC_CXO_CLK 10
+#define GPU_CC_GMU_CLK_SRC 11
+#define GPU_CC_GX_GFX3D_CLK 12
+#define GPU_CC_GX_GFX3D_CLK_SRC 13
+#define GPU_CC_GX_GMU_CLK 14
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 15
+#define GPU_CC_SLEEP_CLK 16
+
+/* GPU_CC power domains */
+#define CX_GDSC 0
+#define GX_GDSC 1
+
+/* GPU_CC resets */
+#define GPU_CC_CX_BCR 0
+#define GPU_CC_GFX3D_AON_BCR 1
+#define GPU_CC_GMU_BCR 2
+#define GPU_CC_GX_BCR 3
+#define GPU_CC_XO_BCR 4
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcs615-videocc.h b/include/dt-bindings/clock/qcom,qcs615-videocc.h
new file mode 100644
index 000000000000..0ca3efb21103
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcs615-videocc.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_QCS615_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_QCS615_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_CC_SLEEP_CLK 0
+#define VIDEO_CC_SLEEP_CLK_SRC 1
+#define VIDEO_CC_VCODEC0_AXI_CLK 2
+#define VIDEO_CC_VCODEC0_CORE_CLK 3
+#define VIDEO_CC_VENUS_AHB_CLK 4
+#define VIDEO_CC_VENUS_CLK_SRC 5
+#define VIDEO_CC_VENUS_CTL_AXI_CLK 6
+#define VIDEO_CC_VENUS_CTL_CORE_CLK 7
+#define VIDEO_CC_XO_CLK 8
+#define VIDEO_PLL0 9
+
+/* VIDEO_CC power domains */
+#define VCODEC0_GDSC 0
+#define VENUS_GDSC 1
+
+/* VIDEO_CC resets */
+#define VIDEO_CC_INTERFACE_BCR 0
+#define VIDEO_CC_VCODEC0_BCR 1
+#define VIDEO_CC_VENUS_BCR 2
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,x1e80100-gcc.h b/include/dt-bindings/clock/qcom,x1e80100-gcc.h
index 24ba9e2a5cf6..710c340f24a5 100644
--- a/include/dt-bindings/clock/qcom,x1e80100-gcc.h
+++ b/include/dt-bindings/clock/qcom,x1e80100-gcc.h
@@ -482,4 +482,6 @@
#define GCC_USB_1_PHY_BCR 85
#define GCC_USB_2_PHY_BCR 86
#define GCC_VIDEO_BCR 87
+#define GCC_VIDEO_AXI0_CLK_ARES 88
+#define GCC_VIDEO_AXI1_CLK_ARES 89
#endif
diff --git a/include/dt-bindings/clock/r9a07g043-cpg.h b/include/dt-bindings/clock/r9a07g043-cpg.h
index 131993343777..e1f65f1928cf 100644
--- a/include/dt-bindings/clock/r9a07g043-cpg.h
+++ b/include/dt-bindings/clock/r9a07g043-cpg.h
@@ -200,57 +200,4 @@
#define R9A07G043_AX45MP_CORE0_RESETN 78 /* RZ/Five Only */
#define R9A07G043_IAX45_RESETN 79 /* RZ/Five Only */
-/* Power domain IDs. */
-#define R9A07G043_PD_ALWAYS_ON 0
-#define R9A07G043_PD_GIC 1 /* RZ/G2UL Only */
-#define R9A07G043_PD_IA55 2 /* RZ/G2UL Only */
-#define R9A07G043_PD_MHU 3 /* RZ/G2UL Only */
-#define R9A07G043_PD_CORESIGHT 4 /* RZ/G2UL Only */
-#define R9A07G043_PD_SYC 5 /* RZ/G2UL Only */
-#define R9A07G043_PD_DMAC 6
-#define R9A07G043_PD_GTM0 7
-#define R9A07G043_PD_GTM1 8
-#define R9A07G043_PD_GTM2 9
-#define R9A07G043_PD_MTU 10
-#define R9A07G043_PD_POE3 11
-#define R9A07G043_PD_WDT0 12
-#define R9A07G043_PD_SPI 13
-#define R9A07G043_PD_SDHI0 14
-#define R9A07G043_PD_SDHI1 15
-#define R9A07G043_PD_ISU 16 /* RZ/G2UL Only */
-#define R9A07G043_PD_CRU 17 /* RZ/G2UL Only */
-#define R9A07G043_PD_LCDC 18 /* RZ/G2UL Only */
-#define R9A07G043_PD_SSI0 19
-#define R9A07G043_PD_SSI1 20
-#define R9A07G043_PD_SSI2 21
-#define R9A07G043_PD_SSI3 22
-#define R9A07G043_PD_SRC 23
-#define R9A07G043_PD_USB0 24
-#define R9A07G043_PD_USB1 25
-#define R9A07G043_PD_USB_PHY 26
-#define R9A07G043_PD_ETHER0 27
-#define R9A07G043_PD_ETHER1 28
-#define R9A07G043_PD_I2C0 29
-#define R9A07G043_PD_I2C1 30
-#define R9A07G043_PD_I2C2 31
-#define R9A07G043_PD_I2C3 32
-#define R9A07G043_PD_SCIF0 33
-#define R9A07G043_PD_SCIF1 34
-#define R9A07G043_PD_SCIF2 35
-#define R9A07G043_PD_SCIF3 36
-#define R9A07G043_PD_SCIF4 37
-#define R9A07G043_PD_SCI0 38
-#define R9A07G043_PD_SCI1 39
-#define R9A07G043_PD_IRDA 40
-#define R9A07G043_PD_RSPI0 41
-#define R9A07G043_PD_RSPI1 42
-#define R9A07G043_PD_RSPI2 43
-#define R9A07G043_PD_CANFD 44
-#define R9A07G043_PD_ADC 45
-#define R9A07G043_PD_TSU 46
-#define R9A07G043_PD_PLIC 47 /* RZ/Five Only */
-#define R9A07G043_PD_IAX45 48 /* RZ/Five Only */
-#define R9A07G043_PD_NCEPLDM 49 /* RZ/Five Only */
-#define R9A07G043_PD_NCEPLMT 50 /* RZ/Five Only */
-
#endif /* __DT_BINDINGS_CLOCK_R9A07G043_CPG_H__ */
diff --git a/include/dt-bindings/clock/r9a07g044-cpg.h b/include/dt-bindings/clock/r9a07g044-cpg.h
index e209f96f92b7..0bb17ff1a01a 100644
--- a/include/dt-bindings/clock/r9a07g044-cpg.h
+++ b/include/dt-bindings/clock/r9a07g044-cpg.h
@@ -217,62 +217,4 @@
#define R9A07G044_ADC_ADRST_N 82
#define R9A07G044_TSU_PRESETN 83
-/* Power domain IDs. */
-#define R9A07G044_PD_ALWAYS_ON 0
-#define R9A07G044_PD_GIC 1
-#define R9A07G044_PD_IA55 2
-#define R9A07G044_PD_MHU 3
-#define R9A07G044_PD_CORESIGHT 4
-#define R9A07G044_PD_SYC 5
-#define R9A07G044_PD_DMAC 6
-#define R9A07G044_PD_GTM0 7
-#define R9A07G044_PD_GTM1 8
-#define R9A07G044_PD_GTM2 9
-#define R9A07G044_PD_MTU 10
-#define R9A07G044_PD_POE3 11
-#define R9A07G044_PD_GPT 12
-#define R9A07G044_PD_POEGA 13
-#define R9A07G044_PD_POEGB 14
-#define R9A07G044_PD_POEGC 15
-#define R9A07G044_PD_POEGD 16
-#define R9A07G044_PD_WDT0 17
-#define R9A07G044_PD_WDT1 18
-#define R9A07G044_PD_SPI 19
-#define R9A07G044_PD_SDHI0 20
-#define R9A07G044_PD_SDHI1 21
-#define R9A07G044_PD_3DGE 22
-#define R9A07G044_PD_ISU 23
-#define R9A07G044_PD_VCPL4 24
-#define R9A07G044_PD_CRU 25
-#define R9A07G044_PD_MIPI_DSI 26
-#define R9A07G044_PD_LCDC 27
-#define R9A07G044_PD_SSI0 28
-#define R9A07G044_PD_SSI1 29
-#define R9A07G044_PD_SSI2 30
-#define R9A07G044_PD_SSI3 31
-#define R9A07G044_PD_SRC 32
-#define R9A07G044_PD_USB0 33
-#define R9A07G044_PD_USB1 34
-#define R9A07G044_PD_USB_PHY 35
-#define R9A07G044_PD_ETHER0 36
-#define R9A07G044_PD_ETHER1 37
-#define R9A07G044_PD_I2C0 38
-#define R9A07G044_PD_I2C1 39
-#define R9A07G044_PD_I2C2 40
-#define R9A07G044_PD_I2C3 41
-#define R9A07G044_PD_SCIF0 42
-#define R9A07G044_PD_SCIF1 43
-#define R9A07G044_PD_SCIF2 44
-#define R9A07G044_PD_SCIF3 45
-#define R9A07G044_PD_SCIF4 46
-#define R9A07G044_PD_SCI0 47
-#define R9A07G044_PD_SCI1 48
-#define R9A07G044_PD_IRDA 49
-#define R9A07G044_PD_RSPI0 50
-#define R9A07G044_PD_RSPI1 51
-#define R9A07G044_PD_RSPI2 52
-#define R9A07G044_PD_CANFD 53
-#define R9A07G044_PD_ADC 54
-#define R9A07G044_PD_TSU 55
-
#endif /* __DT_BINDINGS_CLOCK_R9A07G044_CPG_H__ */
diff --git a/include/dt-bindings/clock/r9a07g054-cpg.h b/include/dt-bindings/clock/r9a07g054-cpg.h
index 2c99f89397c4..43f4dbda872c 100644
--- a/include/dt-bindings/clock/r9a07g054-cpg.h
+++ b/include/dt-bindings/clock/r9a07g054-cpg.h
@@ -226,62 +226,4 @@
#define R9A07G054_TSU_PRESETN 83
#define R9A07G054_STPAI_ARESETN 84
-/* Power domain IDs. */
-#define R9A07G054_PD_ALWAYS_ON 0
-#define R9A07G054_PD_GIC 1
-#define R9A07G054_PD_IA55 2
-#define R9A07G054_PD_MHU 3
-#define R9A07G054_PD_CORESIGHT 4
-#define R9A07G054_PD_SYC 5
-#define R9A07G054_PD_DMAC 6
-#define R9A07G054_PD_GTM0 7
-#define R9A07G054_PD_GTM1 8
-#define R9A07G054_PD_GTM2 9
-#define R9A07G054_PD_MTU 10
-#define R9A07G054_PD_POE3 11
-#define R9A07G054_PD_GPT 12
-#define R9A07G054_PD_POEGA 13
-#define R9A07G054_PD_POEGB 14
-#define R9A07G054_PD_POEGC 15
-#define R9A07G054_PD_POEGD 16
-#define R9A07G054_PD_WDT0 17
-#define R9A07G054_PD_WDT1 18
-#define R9A07G054_PD_SPI 19
-#define R9A07G054_PD_SDHI0 20
-#define R9A07G054_PD_SDHI1 21
-#define R9A07G054_PD_3DGE 22
-#define R9A07G054_PD_ISU 23
-#define R9A07G054_PD_VCPL4 24
-#define R9A07G054_PD_CRU 25
-#define R9A07G054_PD_MIPI_DSI 26
-#define R9A07G054_PD_LCDC 27
-#define R9A07G054_PD_SSI0 28
-#define R9A07G054_PD_SSI1 29
-#define R9A07G054_PD_SSI2 30
-#define R9A07G054_PD_SSI3 31
-#define R9A07G054_PD_SRC 32
-#define R9A07G054_PD_USB0 33
-#define R9A07G054_PD_USB1 34
-#define R9A07G054_PD_USB_PHY 35
-#define R9A07G054_PD_ETHER0 36
-#define R9A07G054_PD_ETHER1 37
-#define R9A07G054_PD_I2C0 38
-#define R9A07G054_PD_I2C1 39
-#define R9A07G054_PD_I2C2 40
-#define R9A07G054_PD_I2C3 41
-#define R9A07G054_PD_SCIF0 42
-#define R9A07G054_PD_SCIF1 43
-#define R9A07G054_PD_SCIF2 44
-#define R9A07G054_PD_SCIF3 45
-#define R9A07G054_PD_SCIF4 46
-#define R9A07G054_PD_SCI0 47
-#define R9A07G054_PD_SCI1 48
-#define R9A07G054_PD_IRDA 49
-#define R9A07G054_PD_RSPI0 50
-#define R9A07G054_PD_RSPI1 51
-#define R9A07G054_PD_RSPI2 52
-#define R9A07G054_PD_CANFD 53
-#define R9A07G054_PD_ADC 54
-#define R9A07G054_PD_TSU 55
-
#endif /* __DT_BINDINGS_CLOCK_R9A07G054_CPG_H__ */
diff --git a/include/dt-bindings/clock/r9a08g045-cpg.h b/include/dt-bindings/clock/r9a08g045-cpg.h
index 311521fe4b59..410725b778a8 100644
--- a/include/dt-bindings/clock/r9a08g045-cpg.h
+++ b/include/dt-bindings/clock/r9a08g045-cpg.h
@@ -239,75 +239,4 @@
#define R9A08G045_I3C_PRESETN 92
#define R9A08G045_VBAT_BRESETN 93
-/* Power domain IDs. */
-#define R9A08G045_PD_ALWAYS_ON 0
-#define R9A08G045_PD_GIC 1
-#define R9A08G045_PD_IA55 2
-#define R9A08G045_PD_MHU 3
-#define R9A08G045_PD_CORESIGHT 4
-#define R9A08G045_PD_SYC 5
-#define R9A08G045_PD_DMAC 6
-#define R9A08G045_PD_GTM0 7
-#define R9A08G045_PD_GTM1 8
-#define R9A08G045_PD_GTM2 9
-#define R9A08G045_PD_GTM3 10
-#define R9A08G045_PD_GTM4 11
-#define R9A08G045_PD_GTM5 12
-#define R9A08G045_PD_GTM6 13
-#define R9A08G045_PD_GTM7 14
-#define R9A08G045_PD_MTU 15
-#define R9A08G045_PD_POE3 16
-#define R9A08G045_PD_GPT 17
-#define R9A08G045_PD_POEGA 18
-#define R9A08G045_PD_POEGB 19
-#define R9A08G045_PD_POEGC 20
-#define R9A08G045_PD_POEGD 21
-#define R9A08G045_PD_WDT0 22
-#define R9A08G045_PD_XSPI 23
-#define R9A08G045_PD_SDHI0 24
-#define R9A08G045_PD_SDHI1 25
-#define R9A08G045_PD_SDHI2 26
-#define R9A08G045_PD_SSI0 27
-#define R9A08G045_PD_SSI1 28
-#define R9A08G045_PD_SSI2 29
-#define R9A08G045_PD_SSI3 30
-#define R9A08G045_PD_SRC 31
-#define R9A08G045_PD_USB0 32
-#define R9A08G045_PD_USB1 33
-#define R9A08G045_PD_USB_PHY 34
-#define R9A08G045_PD_ETHER0 35
-#define R9A08G045_PD_ETHER1 36
-#define R9A08G045_PD_I2C0 37
-#define R9A08G045_PD_I2C1 38
-#define R9A08G045_PD_I2C2 39
-#define R9A08G045_PD_I2C3 40
-#define R9A08G045_PD_SCIF0 41
-#define R9A08G045_PD_SCIF1 42
-#define R9A08G045_PD_SCIF2 43
-#define R9A08G045_PD_SCIF3 44
-#define R9A08G045_PD_SCIF4 45
-#define R9A08G045_PD_SCIF5 46
-#define R9A08G045_PD_SCI0 47
-#define R9A08G045_PD_SCI1 48
-#define R9A08G045_PD_IRDA 49
-#define R9A08G045_PD_RSPI0 50
-#define R9A08G045_PD_RSPI1 51
-#define R9A08G045_PD_RSPI2 52
-#define R9A08G045_PD_RSPI3 53
-#define R9A08G045_PD_RSPI4 54
-#define R9A08G045_PD_CANFD 55
-#define R9A08G045_PD_ADC 56
-#define R9A08G045_PD_TSU 57
-#define R9A08G045_PD_OCTA 58
-#define R9A08G045_PD_PDM 59
-#define R9A08G045_PD_PCI 60
-#define R9A08G045_PD_SPDIF 61
-#define R9A08G045_PD_I3C 62
-#define R9A08G045_PD_VBAT 63
-
-#define R9A08G045_PD_DDR 64
-#define R9A08G045_PD_TZCDDR 65
-#define R9A08G045_PD_OTFDE_DDR 66
-#define R9A08G045_PD_RTC 67
-
#endif /* __DT_BINDINGS_CLOCK_R9A08G045_CPG_H__ */
diff --git a/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h b/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h
new file mode 100644
index 000000000000..7ecc4f0b235a
--- /dev/null
+++ b/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_RENESAS_R9A09G077_CPG_H__
+#define __DT_BINDINGS_CLOCK_RENESAS_R9A09G077_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* R9A09G077 CPG Core Clocks */
+#define R9A09G077_CLK_CA55C0 0
+#define R9A09G077_CLK_CA55C1 1
+#define R9A09G077_CLK_CA55C2 2
+#define R9A09G077_CLK_CA55C3 3
+#define R9A09G077_CLK_CA55S 4
+#define R9A09G077_CLK_CR52_CPU0 5
+#define R9A09G077_CLK_CR52_CPU1 6
+#define R9A09G077_CLK_CKIO 7
+#define R9A09G077_CLK_PCLKAH 8
+#define R9A09G077_CLK_PCLKAM 9
+#define R9A09G077_CLK_PCLKAL 10
+#define R9A09G077_CLK_PCLKGPTL 11
+#define R9A09G077_CLK_PCLKH 12
+#define R9A09G077_CLK_PCLKM 13
+#define R9A09G077_CLK_PCLKL 14
+#define R9A09G077_SDHI_CLKHS 15
+
+#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A09G077_CPG_H__ */
diff --git a/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h b/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h
new file mode 100644
index 000000000000..925e57703925
--- /dev/null
+++ b/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_RENESAS_R9A09G087_CPG_H__
+#define __DT_BINDINGS_CLOCK_RENESAS_R9A09G087_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* R9A09G087 CPG Core Clocks */
+#define R9A09G087_CLK_CA55C0 0
+#define R9A09G087_CLK_CA55C1 1
+#define R9A09G087_CLK_CA55C2 2
+#define R9A09G087_CLK_CA55C3 3
+#define R9A09G087_CLK_CA55S 4
+#define R9A09G087_CLK_CR52_CPU0 5
+#define R9A09G087_CLK_CR52_CPU1 6
+#define R9A09G087_CLK_CKIO 7
+#define R9A09G087_CLK_PCLKAH 8
+#define R9A09G087_CLK_PCLKAM 9
+#define R9A09G087_CLK_PCLKAL 10
+#define R9A09G087_CLK_PCLKGPTL 11
+#define R9A09G087_CLK_PCLKH 12
+#define R9A09G087_CLK_PCLKM 13
+#define R9A09G087_CLK_PCLKL 14
+#define R9A09G087_SDHI_CLKHS 15
+
+#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A09G087_CPG_H__ */
diff --git a/include/dt-bindings/clock/samsung,exynosautov920.h b/include/dt-bindings/clock/samsung,exynosautov920.h
index 5e6896e9627f..93e6233d1358 100644
--- a/include/dt-bindings/clock/samsung,exynosautov920.h
+++ b/include/dt-bindings/clock/samsung,exynosautov920.h
@@ -286,4 +286,13 @@
#define CLK_MOUT_HSI1_USBDRD_USER 3
#define CLK_MOUT_HSI1_USBDRD 4
+/* CMU_HSI2 */
+#define FOUT_PLL_ETH 1
+#define CLK_MOUT_HSI2_NOC_UFS_USER 2
+#define CLK_MOUT_HSI2_UFS_EMBD_USER 3
+#define CLK_MOUT_HSI2_ETHERNET 4
+#define CLK_MOUT_HSI2_ETHERNET_USER 5
+#define CLK_DOUT_HSI2_ETHERNET 6
+#define CLK_DOUT_HSI2_ETHERNET_PTP 7
+
#endif /* _DT_BINDINGS_CLOCK_EXYNOSAUTOV920_H */
diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h
index d9b7bac30953..f15bcee7c928 100644
--- a/include/dt-bindings/power/qcom-rpmpd.h
+++ b/include/dt-bindings/power/qcom-rpmpd.h
@@ -240,6 +240,7 @@
#define RPMH_REGULATOR_LEVEL_TURBO_L2 432
#define RPMH_REGULATOR_LEVEL_TURBO_L3 448
#define RPMH_REGULATOR_LEVEL_TURBO_L4 452
+#define RPMH_REGULATOR_LEVEL_TURBO_L5 456
#define RPMH_REGULATOR_LEVEL_SUPER_TURBO 464
#define RPMH_REGULATOR_LEVEL_SUPER_TURBO_NO_CPR 480
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 4a34f7f0a864..404883c7af6e 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -38,6 +38,7 @@
enum vgic_type {
VGIC_V2, /* Good ol' GICv2 */
VGIC_V3, /* New fancy GICv3 */
+ VGIC_V5, /* Newer, fancier GICv5 */
};
/* same for all guests, as depending only on the _host's_ GIC model */
@@ -77,9 +78,12 @@ struct vgic_global {
/* Pseudo GICv3 from outer space */
bool no_hw_deactivation;
- /* GIC system register CPU interface */
+ /* GICv3 system register CPU interface */
struct static_key_false gicv3_cpuif;
+ /* GICv3 compat mode on a GICv5 host */
+ bool has_gcie_v3_compat;
+
u32 ich_vtr_el2;
};
@@ -264,6 +268,9 @@ struct vgic_dist {
/* distributor enabled */
bool enabled;
+ /* Supports SGIs without active state */
+ bool nassgicap;
+
/* Wants SGIs without active state */
bool nassgireq;
@@ -434,7 +441,7 @@ struct kvm_kernel_irq_routing_entry;
int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq,
struct kvm_kernel_irq_routing_entry *irq_entry);
-int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq);
+void kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq);
int vgic_v4_load(struct kvm_vcpu *vcpu);
void vgic_v4_commit(struct kvm_vcpu *vcpu);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index fc7f5e039074..1c5bb1e887cd 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1503,7 +1503,7 @@ int acpi_parse_spcr(bool enable_earlycon, bool enable_console);
#else
static inline int acpi_parse_spcr(bool enable_earlycon, bool enable_console)
{
- return 0;
+ return -ENODEV;
}
#endif
diff --git a/include/linux/adi-axi-common.h b/include/linux/adi-axi-common.h
new file mode 100644
index 000000000000..f64f4ad4beda
--- /dev/null
+++ b/include/linux/adi-axi-common.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Analog Devices AXI common registers & definitions
+ *
+ * Copyright 2019 Analog Devices Inc.
+ *
+ * https://wiki.analog.com/resources/fpga/docs/axi_ip
+ * https://wiki.analog.com/resources/fpga/docs/hdl/regmap
+ */
+
+#ifndef ADI_AXI_COMMON_H_
+#define ADI_AXI_COMMON_H_
+
+#define ADI_AXI_REG_VERSION 0x0000
+#define ADI_AXI_REG_FPGA_INFO 0x001C
+
+#define ADI_AXI_PCORE_VER(major, minor, patch) \
+ (((major) << 16) | ((minor) << 8) | (patch))
+
+#define ADI_AXI_PCORE_VER_MAJOR(version) (((version) >> 16) & 0xff)
+#define ADI_AXI_PCORE_VER_MINOR(version) (((version) >> 8) & 0xff)
+#define ADI_AXI_PCORE_VER_PATCH(version) ((version) & 0xff)
+
+#define ADI_AXI_INFO_FPGA_TECH(info) (((info) >> 24) & 0xff)
+#define ADI_AXI_INFO_FPGA_FAMILY(info) (((info) >> 16) & 0xff)
+#define ADI_AXI_INFO_FPGA_SPEED_GRADE(info) (((info) >> 8) & 0xff)
+
+enum adi_axi_fpga_technology {
+ ADI_AXI_FPGA_TECH_UNKNOWN = 0,
+ ADI_AXI_FPGA_TECH_SERIES7,
+ ADI_AXI_FPGA_TECH_ULTRASCALE,
+ ADI_AXI_FPGA_TECH_ULTRASCALE_PLUS,
+};
+
+enum adi_axi_fpga_family {
+ ADI_AXI_FPGA_FAMILY_UNKNOWN = 0,
+ ADI_AXI_FPGA_FAMILY_ARTIX,
+ ADI_AXI_FPGA_FAMILY_KINTEX,
+ ADI_AXI_FPGA_FAMILY_VIRTEX,
+ ADI_AXI_FPGA_FAMILY_ZYNQ,
+};
+
+enum adi_axi_fpga_speed_grade {
+ ADI_AXI_FPGA_SPEED_UNKNOWN = 0,
+ ADI_AXI_FPGA_SPEED_1 = 10,
+ ADI_AXI_FPGA_SPEED_1L = 11,
+ ADI_AXI_FPGA_SPEED_1H = 12,
+ ADI_AXI_FPGA_SPEED_1HV = 13,
+ ADI_AXI_FPGA_SPEED_1LV = 14,
+ ADI_AXI_FPGA_SPEED_2 = 20,
+ ADI_AXI_FPGA_SPEED_2L = 21,
+ ADI_AXI_FPGA_SPEED_2LV = 22,
+ ADI_AXI_FPGA_SPEED_3 = 30,
+};
+
+#endif /* ADI_AXI_COMMON_H_ */
diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
index 8f7931eb7d16..9ef2633e2c08 100644
--- a/include/linux/alloc_tag.h
+++ b/include/linux/alloc_tag.h
@@ -88,7 +88,7 @@ static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct)
return container_of(ct, struct alloc_tag, ct);
}
-#ifdef ARCH_NEEDS_WEAK_PER_CPU
+#if defined(CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU) && defined(MODULE)
/*
* When percpu variables are required to be defined as weak, static percpu
* variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION).
@@ -102,7 +102,7 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
.ct = CODE_TAG_INIT, \
.counters = &_shared_alloc_tag };
-#else /* ARCH_NEEDS_WEAK_PER_CPU */
+#else /* CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU && MODULE */
#ifdef MODULE
@@ -123,7 +123,7 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
#endif /* MODULE */
-#endif /* ARCH_NEEDS_WEAK_PER_CPU */
+#endif /* CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU && MODULE */
DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
mem_alloc_profiling_key);
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h
index 062fbd4c9b77..8cced632ecd0 100644
--- a/include/linux/amd-iommu.h
+++ b/include/linux/amd-iommu.h
@@ -12,20 +12,6 @@
struct amd_iommu;
-/*
- * This is mainly used to communicate information back-and-forth
- * between SVM and IOMMU for setting up and tearing down posted
- * interrupt
- */
-struct amd_iommu_pi_data {
- u32 ga_tag;
- u32 prev_ga_tag;
- u64 base;
- bool is_guest_mode;
- struct vcpu_data *vcpu_data;
- void *ir_data;
-};
-
#ifdef CONFIG_AMD_IOMMU
struct task_struct;
@@ -44,10 +30,8 @@ static inline void amd_iommu_detect(void) { }
/* IOMMU AVIC Function */
extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32));
-extern int
-amd_iommu_update_ga(int cpu, bool is_run, void *data);
-
-extern int amd_iommu_activate_guest_mode(void *data);
+extern int amd_iommu_update_ga(void *data, int cpu, bool ga_log_intr);
+extern int amd_iommu_activate_guest_mode(void *data, int cpu, bool ga_log_intr);
extern int amd_iommu_deactivate_guest_mode(void *data);
#else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
@@ -58,13 +42,12 @@ amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
return 0;
}
-static inline int
-amd_iommu_update_ga(int cpu, bool is_run, void *data)
+static inline int amd_iommu_update_ga(void *data, int cpu, bool ga_log_intr)
{
return 0;
}
-static inline int amd_iommu_activate_guest_mode(void *data)
+static inline int amd_iommu_activate_guest_mode(void *data, int cpu, bool ga_log_intr)
{
return 0;
}
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index cf0afa60e4a7..5be1881abbb6 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -132,8 +132,8 @@ enum virtchnl_ops {
VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP = VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
- VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
- VIRTCHNL_OP_SET_RSS_HENA = 26,
+ VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS = 25,
+ VIRTCHNL_OP_SET_RSS_HASHCFG = 26,
VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
VIRTCHNL_OP_REQUEST_QUEUES = 29,
@@ -974,18 +974,19 @@ struct virtchnl_rss_lut {
VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_lut);
#define virtchnl_rss_lut_LEGACY_SIZEOF 6
-/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
- * VIRTCHNL_OP_SET_RSS_HENA
- * VF sends these messages to get and set the hash filter enable bits for RSS.
+/* VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS
+ * VIRTCHNL_OP_SET_RSS_HASHCFG
+ * VF sends these messages to get and set the hash filter configuration for RSS.
* By default, the PF sets these to all possible traffic types that the
* hardware supports. The VF can query this value if it wants to change the
* traffic types that are hashed by the hardware.
*/
-struct virtchnl_rss_hena {
- u64 hena;
+struct virtchnl_rss_hashcfg {
+ /* Bits defined by enum libie_filter_pctype */
+ u64 hashcfg;
};
-VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hashcfg);
/* Type of RSS algorithm */
enum virtchnl_rss_algorithm {
@@ -1779,10 +1780,10 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
valid_len = sizeof(struct virtchnl_rss_hfunc);
break;
- case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS:
break;
- case VIRTCHNL_OP_SET_RSS_HENA:
- valid_len = sizeof(struct virtchnl_rss_hena);
+ case VIRTCHNL_OP_SET_RSS_HASHCFG:
+ valid_len = sizeof(struct virtchnl_rss_hashcfg);
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
index 5ca2d5699620..7cfe48769239 100644
--- a/include/linux/balloon_compaction.h
+++ b/include/linux/balloon_compaction.h
@@ -4,12 +4,13 @@
*
* Common interface definitions for making balloon pages movable by compaction.
*
- * Balloon page migration makes use of the general non-lru movable page
+ * Balloon page migration makes use of the general "movable_ops page migration"
* feature.
*
* page->private is used to reference the responsible balloon device.
- * page->mapping is used in context of non-lru page migration to reference
- * the address space operations for page isolation/migration/compaction.
+ * That these pages have movable_ops, and which movable_ops apply,
+ * is derived from the page type (PageOffline()) combined with the
+ * PG_movable_ops flag (PageMovableOps()).
*
* As the page isolation scanning step a compaction thread does is a lockless
* procedure (from a page standpoint), it might bring some racy situations while
@@ -17,12 +18,10 @@
* and safely perform balloon's page compaction and migration we must, always,
* ensure following these simple rules:
*
- * i. when updating a balloon's page ->mapping element, strictly do it under
- * the following lock order, independently of the far superior
- * locking scheme (lru_lock, balloon_lock):
+ * i. Setting the PG_movable_ops flag and page->private with the following
+ * lock order
* +-page_lock(page);
* +--spin_lock_irq(&b_dev_info->pages_lock);
- * ... page->mapping updates here ...
*
* ii. isolation or dequeueing procedure must remove the page from balloon
* device page list under b_dev_info->pages_lock.
@@ -78,6 +77,15 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
#ifdef CONFIG_BALLOON_COMPACTION
extern const struct movable_operations balloon_mops;
+/*
+ * balloon_page_device - get the b_dev_info descriptor for the balloon device
+ * that enqueues the given page.
+ */
+static inline struct balloon_dev_info *balloon_page_device(struct page *page)
+{
+ return (struct balloon_dev_info *)page_private(page);
+}
+#endif /* CONFIG_BALLOON_COMPACTION */
/*
* balloon_page_insert - insert a page into the balloon's page list and make
@@ -92,68 +100,34 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon,
struct page *page)
{
__SetPageOffline(page);
- __SetPageMovable(page, &balloon_mops);
- set_page_private(page, (unsigned long)balloon);
+ if (IS_ENABLED(CONFIG_BALLOON_COMPACTION)) {
+ SetPageMovableOps(page);
+ set_page_private(page, (unsigned long)balloon);
+ }
list_add(&page->lru, &balloon->pages);
}
-/*
- * balloon_page_delete - delete a page from balloon's page list and clear
- * the page->private assignement accordingly.
- * @page : page to be released from balloon's page list
- *
- * Caller must ensure the page is locked and the spin_lock protecting balloon
- * pages list is held before deleting a page from the balloon device.
- */
-static inline void balloon_page_delete(struct page *page)
+static inline gfp_t balloon_mapping_gfp_mask(void)
{
- __ClearPageOffline(page);
- __ClearPageMovable(page);
- set_page_private(page, 0);
- /*
- * No touch page.lru field once @page has been isolated
- * because VM is using the field.
- */
- if (!PageIsolated(page))
- list_del(&page->lru);
+ if (IS_ENABLED(CONFIG_BALLOON_COMPACTION))
+ return GFP_HIGHUSER_MOVABLE;
+ return GFP_HIGHUSER;
}
/*
- * balloon_page_device - get the b_dev_info descriptor for the balloon device
- * that enqueues the given page.
+ * balloon_page_finalize - prepare a balloon page that was removed from the
+ * balloon list for release to the page allocator
+ * @page: page to be released to the page allocator
+ *
+ * Caller must ensure that the page is locked.
*/
-static inline struct balloon_dev_info *balloon_page_device(struct page *page)
+static inline void balloon_page_finalize(struct page *page)
{
- return (struct balloon_dev_info *)page_private(page);
+ if (IS_ENABLED(CONFIG_BALLOON_COMPACTION))
+ set_page_private(page, 0);
+ /* PageOffline is sticky until the page is freed to the buddy. */
}
-static inline gfp_t balloon_mapping_gfp_mask(void)
-{
- return GFP_HIGHUSER_MOVABLE;
-}
-
-#else /* !CONFIG_BALLOON_COMPACTION */
-
-static inline void balloon_page_insert(struct balloon_dev_info *balloon,
- struct page *page)
-{
- __SetPageOffline(page);
- list_add(&page->lru, &balloon->pages);
-}
-
-static inline void balloon_page_delete(struct page *page)
-{
- __ClearPageOffline(page);
- list_del(&page->lru);
-}
-
-static inline gfp_t balloon_mapping_gfp_mask(void)
-{
- return GFP_HIGHUSER;
-}
-
-#endif /* CONFIG_BALLOON_COMPACTION */
-
/*
* balloon_page_push - insert a page into a page list.
* @head : pointer to list
diff --git a/include/linux/bcm47xx_nvram.h b/include/linux/bcm47xx_nvram.h
index 7615f8d7b1ed..e4b6ce953ddb 100644
--- a/include/linux/bcm47xx_nvram.h
+++ b/include/linux/bcm47xx_nvram.h
@@ -7,7 +7,6 @@
#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/kernel.h>
#include <linux/vmalloc.h>
#ifdef CONFIG_BCM47XX_NVRAM
diff --git a/include/linux/bcm47xx_sprom.h b/include/linux/bcm47xx_sprom.h
index f8254fd53e15..40a7da3ef50e 100644
--- a/include/linux/bcm47xx_sprom.h
+++ b/include/linux/bcm47xx_sprom.h
@@ -5,8 +5,8 @@
#ifndef __BCM47XX_SPROM_H
#define __BCM47XX_SPROM_H
+#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/kernel.h>
#include <linux/vmalloc.h>
struct ssb_sprom;
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 6d9a53db54b6..5355f8f806a9 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -189,14 +189,14 @@ static __always_inline u64 field_mask(u64 field)
}
#define field_max(field) ((typeof(field))field_mask(field))
#define ____MAKE_OP(type,base,to,from) \
-static __always_inline __##type type##_encode_bits(base v, base field) \
+static __always_inline __##type __must_check type##_encode_bits(base v, base field) \
{ \
if (__builtin_constant_p(v) && (v & ~field_mask(field))) \
__field_overflow(); \
return to((v & field_mask(field)) * field_multiplier(field)); \
} \
-static __always_inline __##type type##_replace_bits(__##type old, \
- base val, base field) \
+static __always_inline __##type __must_check type##_replace_bits(__##type old, \
+ base val, base field) \
{ \
return (old & ~to(field)) | type##_encode_bits(val, field); \
} \
@@ -205,7 +205,7 @@ static __always_inline void type##p_replace_bits(__##type *p, \
{ \
*p = (*p & ~to(field)) | type##_encode_bits(val, field); \
} \
-static __always_inline base type##_get_bits(__##type v, base field) \
+static __always_inline base __must_check type##_get_bits(__##type v, base field) \
{ \
return (from(v) & field)/field_multiplier(field); \
}
diff --git a/include/linux/bits.h b/include/linux/bits.h
index 7ad056219115..a40cc861b3a7 100644
--- a/include/linux/bits.h
+++ b/include/linux/bits.h
@@ -2,10 +2,8 @@
#ifndef __LINUX_BITS_H
#define __LINUX_BITS_H
-#include <linux/const.h>
#include <vdso/bits.h>
#include <uapi/linux/bits.h>
-#include <asm/bitsperlong.h>
#define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
@@ -50,10 +48,14 @@
(type_max(t) << (l) & \
type_max(t) >> (BITS_PER_TYPE(t) - 1 - (h)))))
+#define GENMASK(h, l) GENMASK_TYPE(unsigned long, h, l)
+#define GENMASK_ULL(h, l) GENMASK_TYPE(unsigned long long, h, l)
+
#define GENMASK_U8(h, l) GENMASK_TYPE(u8, h, l)
#define GENMASK_U16(h, l) GENMASK_TYPE(u16, h, l)
#define GENMASK_U32(h, l) GENMASK_TYPE(u32, h, l)
#define GENMASK_U64(h, l) GENMASK_TYPE(u64, h, l)
+#define GENMASK_U128(h, l) GENMASK_TYPE(u128, h, l)
/*
* Fixed-type variants of BIT(), with additional checks like GENMASK_TYPE(). The
@@ -79,28 +81,9 @@
* BUILD_BUG_ON_ZERO is not available in h files included from asm files,
* disable the input check if that is the case.
*/
-#define GENMASK_INPUT_CHECK(h, l) 0
+#define GENMASK(h, l) __GENMASK(h, l)
+#define GENMASK_ULL(h, l) __GENMASK_ULL(h, l)
#endif /* !defined(__ASSEMBLY__) */
-#define GENMASK(h, l) \
- (GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l))
-#define GENMASK_ULL(h, l) \
- (GENMASK_INPUT_CHECK(h, l) + __GENMASK_ULL(h, l))
-
-#if !defined(__ASSEMBLY__)
-/*
- * Missing asm support
- *
- * __GENMASK_U128() depends on _BIT128() which would not work
- * in the asm code, as it shifts an 'unsigned __int128' data
- * type instead of direct representation of 128 bit constants
- * such as long and unsigned long. The fundamental problem is
- * that a 128 bit constant will get silently truncated by the
- * gcc compiler.
- */
-#define GENMASK_U128(h, l) \
- (GENMASK_INPUT_CHECK(h, l) + __GENMASK_U128(h, l))
-#endif
-
#endif /* __LINUX_BITS_H */
diff --git a/include/linux/bnxt/hsi.h b/include/linux/bnxt/hsi.h
new file mode 100644
index 000000000000..549231703bce
--- /dev/null
+++ b/include/linux/bnxt/hsi.h
@@ -0,0 +1,10914 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2014-2018 Broadcom Limited
+ * Copyright (c) 2018-2025 Broadcom Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * DO NOT MODIFY!!! This file is automatically generated.
+ */
+
+#ifndef _BNXT_HSI_H_
+#define _BNXT_HSI_H_
+
+/* hwrm_cmd_hdr (size:128b/16B) */
+struct hwrm_cmd_hdr {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_resp_hdr (size:64b/8B) */
+struct hwrm_resp_hdr {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
+
+#define CMD_DISCR_TLV_ENCAP 0x8000UL
+#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP
+
+
+#define TLV_TYPE_HWRM_REQUEST 0x1UL
+#define TLV_TYPE_HWRM_RESPONSE 0x2UL
+#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN2 0x6UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN2 0x7UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN1_EXT 0x8UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN1_EXT 0x9UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN2_EXT 0xaUL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN2_EXT 0xbUL
+#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY 0x8001UL
+#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
+#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL
+#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL
+#define TLV_TYPE_ENGINE_CKV_HOST_ALGORITHMS 0x8006UL
+#define TLV_TYPE_ENGINE_CKV_HOST_ECC_PUBLIC_KEY 0x8007UL
+#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL
+#define TLV_TYPE_ENGINE_CKV_FW_ECC_PUBLIC_KEY 0x8009UL
+#define TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS 0x800aUL
+#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS
+
+
+/* tlv (size:64b/8B) */
+struct tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 flags;
+ #define TLV_FLAGS_MORE 0x1UL
+ #define TLV_FLAGS_MORE_LAST 0x0UL
+ #define TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define TLV_FLAGS_REQUIRED 0x2UL
+ #define TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+};
+
+/* input (size:128b/16B) */
+struct input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* output (size:64b/8B) */
+struct output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
+
+/* hwrm_short_input (size:128b/16B) */
+struct hwrm_short_input {
+ __le16 req_type;
+ __le16 signature;
+ #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
+ #define SHORT_REQ_SIGNATURE_LAST SHORT_REQ_SIGNATURE_SHORT_CMD
+ __le16 target_id;
+ #define SHORT_REQ_TARGET_ID_DEFAULT 0x0UL
+ #define SHORT_REQ_TARGET_ID_TOOLS 0xfffdUL
+ #define SHORT_REQ_TARGET_ID_LAST SHORT_REQ_TARGET_ID_TOOLS
+ __le16 size;
+ __le64 req_addr;
+};
+
+/* cmd_nums (size:64b/8B) */
+struct cmd_nums {
+ __le16 req_type;
+ #define HWRM_VER_GET 0x0UL
+ #define HWRM_FUNC_ECHO_RESPONSE 0xbUL
+ #define HWRM_ERROR_RECOVERY_QCFG 0xcUL
+ #define HWRM_FUNC_DRV_IF_CHANGE 0xdUL
+ #define HWRM_FUNC_BUF_UNRGTR 0xeUL
+ #define HWRM_FUNC_VF_CFG 0xfUL
+ #define HWRM_RESERVED1 0x10UL
+ #define HWRM_FUNC_RESET 0x11UL
+ #define HWRM_FUNC_GETFID 0x12UL
+ #define HWRM_FUNC_VF_ALLOC 0x13UL
+ #define HWRM_FUNC_VF_FREE 0x14UL
+ #define HWRM_FUNC_QCAPS 0x15UL
+ #define HWRM_FUNC_QCFG 0x16UL
+ #define HWRM_FUNC_CFG 0x17UL
+ #define HWRM_FUNC_QSTATS 0x18UL
+ #define HWRM_FUNC_CLR_STATS 0x19UL
+ #define HWRM_FUNC_DRV_UNRGTR 0x1aUL
+ #define HWRM_FUNC_VF_RESC_FREE 0x1bUL
+ #define HWRM_FUNC_VF_VNIC_IDS_QUERY 0x1cUL
+ #define HWRM_FUNC_DRV_RGTR 0x1dUL
+ #define HWRM_FUNC_DRV_QVER 0x1eUL
+ #define HWRM_FUNC_BUF_RGTR 0x1fUL
+ #define HWRM_PORT_PHY_CFG 0x20UL
+ #define HWRM_PORT_MAC_CFG 0x21UL
+ #define HWRM_PORT_TS_QUERY 0x22UL
+ #define HWRM_PORT_QSTATS 0x23UL
+ #define HWRM_PORT_LPBK_QSTATS 0x24UL
+ #define HWRM_PORT_CLR_STATS 0x25UL
+ #define HWRM_PORT_LPBK_CLR_STATS 0x26UL
+ #define HWRM_PORT_PHY_QCFG 0x27UL
+ #define HWRM_PORT_MAC_QCFG 0x28UL
+ #define HWRM_PORT_MAC_PTP_QCFG 0x29UL
+ #define HWRM_PORT_PHY_QCAPS 0x2aUL
+ #define HWRM_PORT_PHY_I2C_WRITE 0x2bUL
+ #define HWRM_PORT_PHY_I2C_READ 0x2cUL
+ #define HWRM_PORT_LED_CFG 0x2dUL
+ #define HWRM_PORT_LED_QCFG 0x2eUL
+ #define HWRM_PORT_LED_QCAPS 0x2fUL
+ #define HWRM_QUEUE_QPORTCFG 0x30UL
+ #define HWRM_QUEUE_QCFG 0x31UL
+ #define HWRM_QUEUE_CFG 0x32UL
+ #define HWRM_FUNC_VLAN_CFG 0x33UL
+ #define HWRM_FUNC_VLAN_QCFG 0x34UL
+ #define HWRM_QUEUE_PFCENABLE_QCFG 0x35UL
+ #define HWRM_QUEUE_PFCENABLE_CFG 0x36UL
+ #define HWRM_QUEUE_PRI2COS_QCFG 0x37UL
+ #define HWRM_QUEUE_PRI2COS_CFG 0x38UL
+ #define HWRM_QUEUE_COS2BW_QCFG 0x39UL
+ #define HWRM_QUEUE_COS2BW_CFG 0x3aUL
+ #define HWRM_QUEUE_DSCP_QCAPS 0x3bUL
+ #define HWRM_QUEUE_DSCP2PRI_QCFG 0x3cUL
+ #define HWRM_QUEUE_DSCP2PRI_CFG 0x3dUL
+ #define HWRM_VNIC_ALLOC 0x40UL
+ #define HWRM_VNIC_FREE 0x41UL
+ #define HWRM_VNIC_CFG 0x42UL
+ #define HWRM_VNIC_QCFG 0x43UL
+ #define HWRM_VNIC_TPA_CFG 0x44UL
+ #define HWRM_VNIC_TPA_QCFG 0x45UL
+ #define HWRM_VNIC_RSS_CFG 0x46UL
+ #define HWRM_VNIC_RSS_QCFG 0x47UL
+ #define HWRM_VNIC_PLCMODES_CFG 0x48UL
+ #define HWRM_VNIC_PLCMODES_QCFG 0x49UL
+ #define HWRM_VNIC_QCAPS 0x4aUL
+ #define HWRM_VNIC_UPDATE 0x4bUL
+ #define HWRM_RING_ALLOC 0x50UL
+ #define HWRM_RING_FREE 0x51UL
+ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL
+ #define HWRM_RING_AGGINT_QCAPS 0x54UL
+ #define HWRM_RING_SCHQ_ALLOC 0x55UL
+ #define HWRM_RING_SCHQ_CFG 0x56UL
+ #define HWRM_RING_SCHQ_FREE 0x57UL
+ #define HWRM_RING_RESET 0x5eUL
+ #define HWRM_RING_GRP_ALLOC 0x60UL
+ #define HWRM_RING_GRP_FREE 0x61UL
+ #define HWRM_RING_CFG 0x62UL
+ #define HWRM_RING_QCFG 0x63UL
+ #define HWRM_RESERVED5 0x64UL
+ #define HWRM_RESERVED6 0x65UL
+ #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL
+ #define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL
+ #define HWRM_QUEUE_MPLS_QCAPS 0x80UL
+ #define HWRM_QUEUE_MPLSTC2PRI_QCFG 0x81UL
+ #define HWRM_QUEUE_MPLSTC2PRI_CFG 0x82UL
+ #define HWRM_QUEUE_VLANPRI_QCAPS 0x83UL
+ #define HWRM_QUEUE_VLANPRI2PRI_QCFG 0x84UL
+ #define HWRM_QUEUE_VLANPRI2PRI_CFG 0x85UL
+ #define HWRM_QUEUE_GLOBAL_CFG 0x86UL
+ #define HWRM_QUEUE_GLOBAL_QCFG 0x87UL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG 0x88UL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG 0x89UL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG 0x8aUL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG 0x8bUL
+ #define HWRM_QUEUE_QCAPS 0x8cUL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_QCFG 0x8dUL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG 0x8eUL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_QCFG 0x8fUL
+ #define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
+ #define HWRM_CFA_L2_FILTER_FREE 0x91UL
+ #define HWRM_CFA_L2_FILTER_CFG 0x92UL
+ #define HWRM_CFA_L2_SET_RX_MASK 0x93UL
+ #define HWRM_CFA_VLAN_ANTISPOOF_CFG 0x94UL
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC 0x95UL
+ #define HWRM_CFA_TUNNEL_FILTER_FREE 0x96UL
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC 0x97UL
+ #define HWRM_CFA_ENCAP_RECORD_FREE 0x98UL
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC 0x99UL
+ #define HWRM_CFA_NTUPLE_FILTER_FREE 0x9aUL
+ #define HWRM_CFA_NTUPLE_FILTER_CFG 0x9bUL
+ #define HWRM_CFA_EM_FLOW_ALLOC 0x9cUL
+ #define HWRM_CFA_EM_FLOW_FREE 0x9dUL
+ #define HWRM_CFA_EM_FLOW_CFG 0x9eUL
+ #define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL
+ #define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL
+ #define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG 0xa3UL
+ #define HWRM_STAT_CTX_ENG_QUERY 0xafUL
+ #define HWRM_STAT_CTX_ALLOC 0xb0UL
+ #define HWRM_STAT_CTX_FREE 0xb1UL
+ #define HWRM_STAT_CTX_QUERY 0xb2UL
+ #define HWRM_STAT_CTX_CLR_STATS 0xb3UL
+ #define HWRM_PORT_QSTATS_EXT 0xb4UL
+ #define HWRM_PORT_PHY_MDIO_WRITE 0xb5UL
+ #define HWRM_PORT_PHY_MDIO_READ 0xb6UL
+ #define HWRM_PORT_PHY_MDIO_BUS_ACQUIRE 0xb7UL
+ #define HWRM_PORT_PHY_MDIO_BUS_RELEASE 0xb8UL
+ #define HWRM_PORT_QSTATS_EXT_PFC_WD 0xb9UL
+ #define HWRM_RESERVED7 0xbaUL
+ #define HWRM_PORT_TX_FIR_CFG 0xbbUL
+ #define HWRM_PORT_TX_FIR_QCFG 0xbcUL
+ #define HWRM_PORT_ECN_QSTATS 0xbdUL
+ #define HWRM_FW_LIVEPATCH_QUERY 0xbeUL
+ #define HWRM_FW_LIVEPATCH 0xbfUL
+ #define HWRM_FW_RESET 0xc0UL
+ #define HWRM_FW_QSTATUS 0xc1UL
+ #define HWRM_FW_HEALTH_CHECK 0xc2UL
+ #define HWRM_FW_SYNC 0xc3UL
+ #define HWRM_FW_STATE_QCAPS 0xc4UL
+ #define HWRM_FW_STATE_QUIESCE 0xc5UL
+ #define HWRM_FW_STATE_BACKUP 0xc6UL
+ #define HWRM_FW_STATE_RESTORE 0xc7UL
+ #define HWRM_FW_SET_TIME 0xc8UL
+ #define HWRM_FW_GET_TIME 0xc9UL
+ #define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL
+ #define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL
+ #define HWRM_FW_IPC_MAILBOX 0xccUL
+ #define HWRM_FW_ECN_CFG 0xcdUL
+ #define HWRM_FW_ECN_QCFG 0xceUL
+ #define HWRM_FW_SECURE_CFG 0xcfUL
+ #define HWRM_EXEC_FWD_RESP 0xd0UL
+ #define HWRM_REJECT_FWD_RESP 0xd1UL
+ #define HWRM_FWD_RESP 0xd2UL
+ #define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL
+ #define HWRM_OEM_CMD 0xd4UL
+ #define HWRM_PORT_PRBS_TEST 0xd5UL
+ #define HWRM_PORT_SFP_SIDEBAND_CFG 0xd6UL
+ #define HWRM_PORT_SFP_SIDEBAND_QCFG 0xd7UL
+ #define HWRM_FW_STATE_UNQUIESCE 0xd8UL
+ #define HWRM_PORT_DSC_DUMP 0xd9UL
+ #define HWRM_PORT_EP_TX_QCFG 0xdaUL
+ #define HWRM_PORT_EP_TX_CFG 0xdbUL
+ #define HWRM_PORT_CFG 0xdcUL
+ #define HWRM_PORT_QCFG 0xddUL
+ #define HWRM_PORT_MAC_QCAPS 0xdfUL
+ #define HWRM_TEMP_MONITOR_QUERY 0xe0UL
+ #define HWRM_REG_POWER_QUERY 0xe1UL
+ #define HWRM_CORE_FREQUENCY_QUERY 0xe2UL
+ #define HWRM_REG_POWER_HISTOGRAM 0xe3UL
+ #define HWRM_WOL_FILTER_ALLOC 0xf0UL
+ #define HWRM_WOL_FILTER_FREE 0xf1UL
+ #define HWRM_WOL_FILTER_QCFG 0xf2UL
+ #define HWRM_WOL_REASON_QCFG 0xf3UL
+ #define HWRM_CFA_METER_QCAPS 0xf4UL
+ #define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL
+ #define HWRM_CFA_METER_PROFILE_FREE 0xf6UL
+ #define HWRM_CFA_METER_PROFILE_CFG 0xf7UL
+ #define HWRM_CFA_METER_INSTANCE_ALLOC 0xf8UL
+ #define HWRM_CFA_METER_INSTANCE_FREE 0xf9UL
+ #define HWRM_CFA_METER_INSTANCE_CFG 0xfaUL
+ #define HWRM_CFA_VFR_ALLOC 0xfdUL
+ #define HWRM_CFA_VFR_FREE 0xfeUL
+ #define HWRM_CFA_VF_PAIR_ALLOC 0x100UL
+ #define HWRM_CFA_VF_PAIR_FREE 0x101UL
+ #define HWRM_CFA_VF_PAIR_INFO 0x102UL
+ #define HWRM_CFA_FLOW_ALLOC 0x103UL
+ #define HWRM_CFA_FLOW_FREE 0x104UL
+ #define HWRM_CFA_FLOW_FLUSH 0x105UL
+ #define HWRM_CFA_FLOW_STATS 0x106UL
+ #define HWRM_CFA_FLOW_INFO 0x107UL
+ #define HWRM_CFA_DECAP_FILTER_ALLOC 0x108UL
+ #define HWRM_CFA_DECAP_FILTER_FREE 0x109UL
+ #define HWRM_CFA_VLAN_ANTISPOOF_QCFG 0x10aUL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC 0x10bUL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE 0x10cUL
+ #define HWRM_CFA_PAIR_ALLOC 0x10dUL
+ #define HWRM_CFA_PAIR_FREE 0x10eUL
+ #define HWRM_CFA_PAIR_INFO 0x10fUL
+ #define HWRM_FW_IPC_MSG 0x110UL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE 0x112UL
+ #define HWRM_CFA_FLOW_AGING_TIMER_RESET 0x113UL
+ #define HWRM_CFA_FLOW_AGING_CFG 0x114UL
+ #define HWRM_CFA_FLOW_AGING_QCFG 0x115UL
+ #define HWRM_CFA_FLOW_AGING_QCAPS 0x116UL
+ #define HWRM_CFA_CTX_MEM_RGTR 0x117UL
+ #define HWRM_CFA_CTX_MEM_UNRGTR 0x118UL
+ #define HWRM_CFA_CTX_MEM_QCTX 0x119UL
+ #define HWRM_CFA_CTX_MEM_QCAPS 0x11aUL
+ #define HWRM_CFA_COUNTER_QCAPS 0x11bUL
+ #define HWRM_CFA_COUNTER_CFG 0x11cUL
+ #define HWRM_CFA_COUNTER_QCFG 0x11dUL
+ #define HWRM_CFA_COUNTER_QSTATS 0x11eUL
+ #define HWRM_CFA_TCP_FLAG_PROCESS_QCFG 0x11fUL
+ #define HWRM_CFA_EEM_QCAPS 0x120UL
+ #define HWRM_CFA_EEM_CFG 0x121UL
+ #define HWRM_CFA_EEM_QCFG 0x122UL
+ #define HWRM_CFA_EEM_OP 0x123UL
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL
+ #define HWRM_CFA_TFLIB 0x125UL
+ #define HWRM_CFA_LAG_GROUP_MEMBER_RGTR 0x126UL
+ #define HWRM_CFA_LAG_GROUP_MEMBER_UNRGTR 0x127UL
+ #define HWRM_CFA_TLS_FILTER_ALLOC 0x128UL
+ #define HWRM_CFA_TLS_FILTER_FREE 0x129UL
+ #define HWRM_CFA_RELEASE_AFM_FUNC 0x12aUL
+ #define HWRM_ENGINE_CKV_STATUS 0x12eUL
+ #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
+ #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
+ #define HWRM_ENGINE_CKV_KEY_ADD 0x131UL
+ #define HWRM_ENGINE_CKV_KEY_DELETE 0x132UL
+ #define HWRM_ENGINE_CKV_FLUSH 0x133UL
+ #define HWRM_ENGINE_CKV_RNG_GET 0x134UL
+ #define HWRM_ENGINE_CKV_KEY_GEN 0x135UL
+ #define HWRM_ENGINE_CKV_KEY_LABEL_CFG 0x136UL
+ #define HWRM_ENGINE_CKV_KEY_LABEL_QCFG 0x137UL
+ #define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL
+ #define HWRM_ENGINE_QG_QUERY 0x13dUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_QUERY 0x13fUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC 0x140UL
+ #define HWRM_ENGINE_QG_METER_PROFILE_FREE 0x141UL
+ #define HWRM_ENGINE_QG_METER_QUERY 0x142UL
+ #define HWRM_ENGINE_QG_METER_BIND 0x143UL
+ #define HWRM_ENGINE_QG_METER_UNBIND 0x144UL
+ #define HWRM_ENGINE_QG_FUNC_BIND 0x145UL
+ #define HWRM_ENGINE_SG_CONFIG_QUERY 0x146UL
+ #define HWRM_ENGINE_SG_QUERY 0x147UL
+ #define HWRM_ENGINE_SG_METER_QUERY 0x148UL
+ #define HWRM_ENGINE_SG_METER_CONFIG 0x149UL
+ #define HWRM_ENGINE_SG_QG_BIND 0x14aUL
+ #define HWRM_ENGINE_QG_SG_UNBIND 0x14bUL
+ #define HWRM_ENGINE_CONFIG_QUERY 0x154UL
+ #define HWRM_ENGINE_STATS_CONFIG 0x155UL
+ #define HWRM_ENGINE_STATS_CLEAR 0x156UL
+ #define HWRM_ENGINE_STATS_QUERY 0x157UL
+ #define HWRM_ENGINE_STATS_QUERY_CONTINUOUS_ERROR 0x158UL
+ #define HWRM_ENGINE_RQ_ALLOC 0x15eUL
+ #define HWRM_ENGINE_RQ_FREE 0x15fUL
+ #define HWRM_ENGINE_CQ_ALLOC 0x160UL
+ #define HWRM_ENGINE_CQ_FREE 0x161UL
+ #define HWRM_ENGINE_NQ_ALLOC 0x162UL
+ #define HWRM_ENGINE_NQ_FREE 0x163UL
+ #define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL
+ #define HWRM_ENGINE_FUNC_QCFG 0x165UL
+ #define HWRM_FUNC_RESOURCE_QCAPS 0x190UL
+ #define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL
+ #define HWRM_FUNC_BACKING_STORE_QCAPS 0x192UL
+ #define HWRM_FUNC_BACKING_STORE_CFG 0x193UL
+ #define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL
+ #define HWRM_FUNC_VF_BW_CFG 0x195UL
+ #define HWRM_FUNC_VF_BW_QCFG 0x196UL
+ #define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
+ #define HWRM_FUNC_QSTATS_EXT 0x198UL
+ #define HWRM_STAT_EXT_CTX_QUERY 0x199UL
+ #define HWRM_FUNC_SPD_CFG 0x19aUL
+ #define HWRM_FUNC_SPD_QCFG 0x19bUL
+ #define HWRM_FUNC_PTP_PIN_QCFG 0x19cUL
+ #define HWRM_FUNC_PTP_PIN_CFG 0x19dUL
+ #define HWRM_FUNC_PTP_CFG 0x19eUL
+ #define HWRM_FUNC_PTP_TS_QUERY 0x19fUL
+ #define HWRM_FUNC_PTP_EXT_CFG 0x1a0UL
+ #define HWRM_FUNC_PTP_EXT_QCFG 0x1a1UL
+ #define HWRM_FUNC_KEY_CTX_ALLOC 0x1a2UL
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2 0x1a3UL
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2 0x1a4UL
+ #define HWRM_FUNC_DBR_PACING_CFG 0x1a5UL
+ #define HWRM_FUNC_DBR_PACING_QCFG 0x1a6UL
+ #define HWRM_FUNC_DBR_PACING_BROADCAST_EVENT 0x1a7UL
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2 0x1a8UL
+ #define HWRM_FUNC_DBR_PACING_NQLIST_QUERY 0x1a9UL
+ #define HWRM_FUNC_DBR_RECOVERY_COMPLETED 0x1aaUL
+ #define HWRM_FUNC_SYNCE_CFG 0x1abUL
+ #define HWRM_FUNC_SYNCE_QCFG 0x1acUL
+ #define HWRM_FUNC_KEY_CTX_FREE 0x1adUL
+ #define HWRM_FUNC_LAG_MODE_CFG 0x1aeUL
+ #define HWRM_FUNC_LAG_MODE_QCFG 0x1afUL
+ #define HWRM_FUNC_LAG_CREATE 0x1b0UL
+ #define HWRM_FUNC_LAG_UPDATE 0x1b1UL
+ #define HWRM_FUNC_LAG_FREE 0x1b2UL
+ #define HWRM_FUNC_LAG_QCFG 0x1b3UL
+ #define HWRM_FUNC_TIMEDTX_PACING_RATE_ADD 0x1c2UL
+ #define HWRM_FUNC_TIMEDTX_PACING_RATE_DELETE 0x1c3UL
+ #define HWRM_FUNC_TIMEDTX_PACING_RATE_QUERY 0x1c4UL
+ #define HWRM_SELFTEST_QLIST 0x200UL
+ #define HWRM_SELFTEST_EXEC 0x201UL
+ #define HWRM_SELFTEST_IRQ 0x202UL
+ #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL
+ #define HWRM_PCIE_QSTATS 0x204UL
+ #define HWRM_MFG_FRU_WRITE_CONTROL 0x205UL
+ #define HWRM_MFG_TIMERS_QUERY 0x206UL
+ #define HWRM_MFG_OTP_CFG 0x207UL
+ #define HWRM_MFG_OTP_QCFG 0x208UL
+ #define HWRM_MFG_HDMA_TEST 0x209UL
+ #define HWRM_MFG_FRU_EEPROM_WRITE 0x20aUL
+ #define HWRM_MFG_FRU_EEPROM_READ 0x20bUL
+ #define HWRM_MFG_SOC_IMAGE 0x20cUL
+ #define HWRM_MFG_SOC_QSTATUS 0x20dUL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_FINALIZE 0x20eUL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_READ 0x20fUL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_HEALTH 0x210UL
+ #define HWRM_MFG_PRVSN_EXPORT_CSR 0x211UL
+ #define HWRM_MFG_PRVSN_IMPORT_CERT 0x212UL
+ #define HWRM_MFG_PRVSN_GET_STATE 0x213UL
+ #define HWRM_MFG_GET_NVM_MEASUREMENT 0x214UL
+ #define HWRM_MFG_PSOC_QSTATUS 0x215UL
+ #define HWRM_MFG_SELFTEST_QLIST 0x216UL
+ #define HWRM_MFG_SELFTEST_EXEC 0x217UL
+ #define HWRM_STAT_GENERIC_QSTATS 0x218UL
+ #define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL
+ #define HWRM_STAT_DB_ERROR_QSTATS 0x21aUL
+ #define HWRM_MFG_TESTS 0x21bUL
+ #define HWRM_MFG_WRITE_CERT_NVM 0x21cUL
+ #define HWRM_PORT_POE_CFG 0x230UL
+ #define HWRM_PORT_POE_QCFG 0x231UL
+ #define HWRM_UDCC_QCAPS 0x258UL
+ #define HWRM_UDCC_CFG 0x259UL
+ #define HWRM_UDCC_QCFG 0x25aUL
+ #define HWRM_UDCC_SESSION_CFG 0x25bUL
+ #define HWRM_UDCC_SESSION_QCFG 0x25cUL
+ #define HWRM_UDCC_SESSION_QUERY 0x25dUL
+ #define HWRM_UDCC_COMP_CFG 0x25eUL
+ #define HWRM_UDCC_COMP_QCFG 0x25fUL
+ #define HWRM_UDCC_COMP_QUERY 0x260UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x261UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x262UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x263UL
+ #define HWRM_TF 0x2bcUL
+ #define HWRM_TF_VERSION_GET 0x2bdUL
+ #define HWRM_TF_SESSION_OPEN 0x2c6UL
+ #define HWRM_TF_SESSION_REGISTER 0x2c8UL
+ #define HWRM_TF_SESSION_UNREGISTER 0x2c9UL
+ #define HWRM_TF_SESSION_CLOSE 0x2caUL
+ #define HWRM_TF_SESSION_QCFG 0x2cbUL
+ #define HWRM_TF_SESSION_RESC_QCAPS 0x2ccUL
+ #define HWRM_TF_SESSION_RESC_ALLOC 0x2cdUL
+ #define HWRM_TF_SESSION_RESC_FREE 0x2ceUL
+ #define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL
+ #define HWRM_TF_SESSION_RESC_INFO 0x2d0UL
+ #define HWRM_TF_SESSION_HOTUP_STATE_SET 0x2d1UL
+ #define HWRM_TF_SESSION_HOTUP_STATE_GET 0x2d2UL
+ #define HWRM_TF_TBL_TYPE_GET 0x2daUL
+ #define HWRM_TF_TBL_TYPE_SET 0x2dbUL
+ #define HWRM_TF_TBL_TYPE_BULK_GET 0x2dcUL
+ #define HWRM_TF_EM_INSERT 0x2eaUL
+ #define HWRM_TF_EM_DELETE 0x2ebUL
+ #define HWRM_TF_EM_HASH_INSERT 0x2ecUL
+ #define HWRM_TF_EM_MOVE 0x2edUL
+ #define HWRM_TF_TCAM_SET 0x2f8UL
+ #define HWRM_TF_TCAM_GET 0x2f9UL
+ #define HWRM_TF_TCAM_MOVE 0x2faUL
+ #define HWRM_TF_TCAM_FREE 0x2fbUL
+ #define HWRM_TF_GLOBAL_CFG_SET 0x2fcUL
+ #define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL
+ #define HWRM_TF_IF_TBL_SET 0x2feUL
+ #define HWRM_TF_IF_TBL_GET 0x2ffUL
+ #define HWRM_TF_RESC_USAGE_SET 0x300UL
+ #define HWRM_TF_RESC_USAGE_QUERY 0x301UL
+ #define HWRM_TF_TBL_TYPE_ALLOC 0x302UL
+ #define HWRM_TF_TBL_TYPE_FREE 0x303UL
+ #define HWRM_TFC_TBL_SCOPE_QCAPS 0x380UL
+ #define HWRM_TFC_TBL_SCOPE_ID_ALLOC 0x381UL
+ #define HWRM_TFC_TBL_SCOPE_CONFIG 0x382UL
+ #define HWRM_TFC_TBL_SCOPE_DECONFIG 0x383UL
+ #define HWRM_TFC_TBL_SCOPE_FID_ADD 0x384UL
+ #define HWRM_TFC_TBL_SCOPE_FID_REM 0x385UL
+ #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC 0x386UL
+ #define HWRM_TFC_TBL_SCOPE_POOL_FREE 0x387UL
+ #define HWRM_TFC_SESSION_ID_ALLOC 0x388UL
+ #define HWRM_TFC_SESSION_FID_ADD 0x389UL
+ #define HWRM_TFC_SESSION_FID_REM 0x38aUL
+ #define HWRM_TFC_IDENT_ALLOC 0x38bUL
+ #define HWRM_TFC_IDENT_FREE 0x38cUL
+ #define HWRM_TFC_IDX_TBL_ALLOC 0x38dUL
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET 0x38eUL
+ #define HWRM_TFC_IDX_TBL_SET 0x38fUL
+ #define HWRM_TFC_IDX_TBL_GET 0x390UL
+ #define HWRM_TFC_IDX_TBL_FREE 0x391UL
+ #define HWRM_TFC_GLOBAL_ID_ALLOC 0x392UL
+ #define HWRM_TFC_TCAM_SET 0x393UL
+ #define HWRM_TFC_TCAM_GET 0x394UL
+ #define HWRM_TFC_TCAM_ALLOC 0x395UL
+ #define HWRM_TFC_TCAM_ALLOC_SET 0x396UL
+ #define HWRM_TFC_TCAM_FREE 0x397UL
+ #define HWRM_TFC_IF_TBL_SET 0x398UL
+ #define HWRM_TFC_IF_TBL_GET 0x399UL
+ #define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
+ #define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL
+ #define HWRM_TFC_GLOBAL_ID_FREE 0x39cUL
+ #define HWRM_TFC_TCAM_PRI_UPDATE 0x39dUL
+ #define HWRM_TFC_HOT_UPGRADE_PROCESS 0x3a0UL
+ #define HWRM_SV 0x400UL
+ #define HWRM_DBG_SERDES_TEST 0xff0eUL
+ #define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL
+ #define HWRM_DBG_READ_DIRECT 0xff10UL
+ #define HWRM_DBG_READ_INDIRECT 0xff11UL
+ #define HWRM_DBG_WRITE_DIRECT 0xff12UL
+ #define HWRM_DBG_WRITE_INDIRECT 0xff13UL
+ #define HWRM_DBG_DUMP 0xff14UL
+ #define HWRM_DBG_ERASE_NVM 0xff15UL
+ #define HWRM_DBG_CFG 0xff16UL
+ #define HWRM_DBG_COREDUMP_LIST 0xff17UL
+ #define HWRM_DBG_COREDUMP_INITIATE 0xff18UL
+ #define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL
+ #define HWRM_DBG_FW_CLI 0xff1aUL
+ #define HWRM_DBG_I2C_CMD 0xff1bUL
+ #define HWRM_DBG_RING_INFO_GET 0xff1cUL
+ #define HWRM_DBG_CRASHDUMP_HEADER 0xff1dUL
+ #define HWRM_DBG_CRASHDUMP_ERASE 0xff1eUL
+ #define HWRM_DBG_DRV_TRACE 0xff1fUL
+ #define HWRM_DBG_QCAPS 0xff20UL
+ #define HWRM_DBG_QCFG 0xff21UL
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL
+ #define HWRM_DBG_USEQ_ALLOC 0xff23UL
+ #define HWRM_DBG_USEQ_FREE 0xff24UL
+ #define HWRM_DBG_USEQ_FLUSH 0xff25UL
+ #define HWRM_DBG_USEQ_QCAPS 0xff26UL
+ #define HWRM_DBG_USEQ_CW_CFG 0xff27UL
+ #define HWRM_DBG_USEQ_SCHED_CFG 0xff28UL
+ #define HWRM_DBG_USEQ_RUN 0xff29UL
+ #define HWRM_DBG_USEQ_DELIVERY_REQ 0xff2aUL
+ #define HWRM_DBG_USEQ_RESP_HDR 0xff2bUL
+ #define HWRM_DBG_COREDUMP_CAPTURE 0xff2cUL
+ #define HWRM_DBG_PTRACE 0xff2dUL
+ #define HWRM_DBG_SIM_CABLE_STATE 0xff2eUL
+ #define HWRM_NVM_GET_VPD_FIELD_INFO 0xffeaUL
+ #define HWRM_NVM_SET_VPD_FIELD_INFO 0xffebUL
+ #define HWRM_NVM_DEFRAG 0xffecUL
+ #define HWRM_NVM_REQ_ARBITRATION 0xffedUL
+ #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
+ #define HWRM_NVM_VALIDATE_OPTION 0xffefUL
+ #define HWRM_NVM_FLUSH 0xfff0UL
+ #define HWRM_NVM_GET_VARIABLE 0xfff1UL
+ #define HWRM_NVM_SET_VARIABLE 0xfff2UL
+ #define HWRM_NVM_INSTALL_UPDATE 0xfff3UL
+ #define HWRM_NVM_MODIFY 0xfff4UL
+ #define HWRM_NVM_VERIFY_UPDATE 0xfff5UL
+ #define HWRM_NVM_GET_DEV_INFO 0xfff6UL
+ #define HWRM_NVM_ERASE_DIR_ENTRY 0xfff7UL
+ #define HWRM_NVM_MOD_DIR_ENTRY 0xfff8UL
+ #define HWRM_NVM_FIND_DIR_ENTRY 0xfff9UL
+ #define HWRM_NVM_GET_DIR_ENTRIES 0xfffaUL
+ #define HWRM_NVM_GET_DIR_INFO 0xfffbUL
+ #define HWRM_NVM_RAW_DUMP 0xfffcUL
+ #define HWRM_NVM_READ 0xfffdUL
+ #define HWRM_NVM_WRITE 0xfffeUL
+ #define HWRM_NVM_RAW_WRITE_BLK 0xffffUL
+ #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK
+ __le16 unused_0[3];
+};
+
+/* ret_codes (size:64b/8B) */
+struct ret_codes {
+ __le16 error_code;
+ #define HWRM_ERR_CODE_SUCCESS 0x0UL
+ #define HWRM_ERR_CODE_FAIL 0x1UL
+ #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL
+ #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL
+ #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
+ #define HWRM_ERR_CODE_NO_BUFFER 0x8UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
+ #define HWRM_ERR_CODE_HOT_RESET_PROGRESS 0xaUL
+ #define HWRM_ERR_CODE_HOT_RESET_FAIL 0xbUL
+ #define HWRM_ERR_CODE_NO_FLOW_COUNTER_DURING_ALLOC 0xcUL
+ #define HWRM_ERR_CODE_KEY_HASH_COLLISION 0xdUL
+ #define HWRM_ERR_CODE_KEY_ALREADY_EXISTS 0xeUL
+ #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
+ #define HWRM_ERR_CODE_BUSY 0x10UL
+ #define HWRM_ERR_CODE_RESOURCE_LOCKED 0x11UL
+ #define HWRM_ERR_CODE_PF_UNAVAILABLE 0x12UL
+ #define HWRM_ERR_CODE_ENTITY_NOT_PRESENT 0x13UL
+ #define HWRM_ERR_CODE_SECURE_SOC_ERROR 0x14UL
+ #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
+ #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
+ #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED
+ __le16 unused_0[3];
+};
+
+/* hwrm_err_output (size:128b/16B) */
+struct hwrm_err_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 opaque_0;
+ __le16 opaque_1;
+ u8 cmd_err;
+ u8 valid;
+};
+#define HWRM_NA_SIGNATURE ((__le32)(-1))
+#define HWRM_MAX_REQ_LEN 128
+#define HWRM_MAX_RESP_LEN 704
+#define HW_HASH_INDEX_SIZE 0x80
+#define HW_HASH_KEY_SIZE 40
+#define HWRM_RESP_VALID_KEY 1
+#define HWRM_TARGET_ID_BONO 0xFFF8
+#define HWRM_TARGET_ID_KONG 0xFFF9
+#define HWRM_TARGET_ID_APE 0xFFFA
+#define HWRM_TARGET_ID_TOOLS 0xFFFD
+#define HWRM_VERSION_MAJOR 1
+#define HWRM_VERSION_MINOR 10
+#define HWRM_VERSION_UPDATE 3
+#define HWRM_VERSION_RSVD 97
+#define HWRM_VERSION_STR "1.10.3.97"
+
+/* hwrm_ver_get_input (size:192b/24B) */
+struct hwrm_ver_get_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 hwrm_intf_maj;
+ u8 hwrm_intf_min;
+ u8 hwrm_intf_upd;
+ u8 unused_0[5];
+};
+
+/* hwrm_ver_get_output (size:1408b/176B) */
+struct hwrm_ver_get_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 hwrm_intf_maj_8b;
+ u8 hwrm_intf_min_8b;
+ u8 hwrm_intf_upd_8b;
+ u8 hwrm_intf_rsvd_8b;
+ u8 hwrm_fw_maj_8b;
+ u8 hwrm_fw_min_8b;
+ u8 hwrm_fw_bld_8b;
+ u8 hwrm_fw_rsvd_8b;
+ u8 mgmt_fw_maj_8b;
+ u8 mgmt_fw_min_8b;
+ u8 mgmt_fw_bld_8b;
+ u8 mgmt_fw_rsvd_8b;
+ u8 netctrl_fw_maj_8b;
+ u8 netctrl_fw_min_8b;
+ u8 netctrl_fw_bld_8b;
+ u8 netctrl_fw_rsvd_8b;
+ __le32 dev_caps_cfg;
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED 0x10UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED 0x20UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED 0x40UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED 0x80UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED 0x100UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_AGING_SUPPORTED 0x200UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED 0x400UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_SOC_CAPABLE 0x10000UL
+ u8 roce_fw_maj_8b;
+ u8 roce_fw_min_8b;
+ u8 roce_fw_bld_8b;
+ u8 roce_fw_rsvd_8b;
+ char hwrm_fw_name[16];
+ char mgmt_fw_name[16];
+ char netctrl_fw_name[16];
+ char active_pkg_name[16];
+ char roce_fw_name[16];
+ __le16 chip_num;
+ u8 chip_rev;
+ u8 chip_metal;
+ u8 chip_bond_id;
+ u8 chip_platform_type;
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_LAST VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM
+ __le16 max_req_win_len;
+ __le16 max_resp_len;
+ __le16 def_req_timeout;
+ u8 flags;
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL
+ #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY_BACKING_STORE 0x4UL
+ u8 unused_0[2];
+ u8 always_1;
+ __le16 hwrm_intf_major;
+ __le16 hwrm_intf_minor;
+ __le16 hwrm_intf_build;
+ __le16 hwrm_intf_patch;
+ __le16 hwrm_fw_major;
+ __le16 hwrm_fw_minor;
+ __le16 hwrm_fw_build;
+ __le16 hwrm_fw_patch;
+ __le16 mgmt_fw_major;
+ __le16 mgmt_fw_minor;
+ __le16 mgmt_fw_build;
+ __le16 mgmt_fw_patch;
+ __le16 netctrl_fw_major;
+ __le16 netctrl_fw_minor;
+ __le16 netctrl_fw_build;
+ __le16 netctrl_fw_patch;
+ __le16 roce_fw_major;
+ __le16 roce_fw_minor;
+ __le16 roce_fw_build;
+ __le16 roce_fw_patch;
+ __le16 max_ext_req_len;
+ __le16 max_req_timeout;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* eject_cmpl (size:128b/16B) */
+struct eject_cmpl {
+ __le16 type;
+ #define EJECT_CMPL_TYPE_MASK 0x3fUL
+ #define EJECT_CMPL_TYPE_SFT 0
+ #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
+ #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT
+ #define EJECT_CMPL_FLAGS_MASK 0xffc0UL
+ #define EJECT_CMPL_FLAGS_SFT 6
+ #define EJECT_CMPL_FLAGS_ERROR 0x40UL
+ __le16 len;
+ __le32 opaque;
+ __le16 v;
+ #define EJECT_CMPL_V 0x1UL
+ #define EJECT_CMPL_ERRORS_MASK 0xfffeUL
+ #define EJECT_CMPL_ERRORS_SFT 1
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_MASK 0xeUL
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_SFT 1
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH (0x5UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_LAST EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH
+ __le16 reserved16;
+ __le32 unused_2;
+};
+
+/* hwrm_cmpl (size:128b/16B) */
+struct hwrm_cmpl {
+ __le16 type;
+ #define CMPL_TYPE_MASK 0x3fUL
+ #define CMPL_TYPE_SFT 0
+ #define CMPL_TYPE_HWRM_DONE 0x20UL
+ #define CMPL_TYPE_LAST CMPL_TYPE_HWRM_DONE
+ __le16 sequence_id;
+ __le32 unused_1;
+ __le32 v;
+ #define CMPL_V 0x1UL
+ __le32 unused_3;
+};
+
+/* hwrm_fwd_req_cmpl (size:128b/16B) */
+struct hwrm_fwd_req_cmpl {
+ __le16 req_len_type;
+ #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL
+ #define FWD_REQ_CMPL_TYPE_SFT 0
+ #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
+ #define FWD_REQ_CMPL_TYPE_LAST FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ
+ #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
+ #define FWD_REQ_CMPL_REQ_LEN_SFT 6
+ __le16 source_id;
+ __le32 unused0;
+ __le32 req_buf_addr_v[2];
+ #define FWD_REQ_CMPL_V 0x1UL
+ #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
+ #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
+};
+
+/* hwrm_fwd_resp_cmpl (size:128b/16B) */
+struct hwrm_fwd_resp_cmpl {
+ __le16 type;
+ #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL
+ #define FWD_RESP_CMPL_TYPE_SFT 0
+ #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
+ #define FWD_RESP_CMPL_TYPE_LAST FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP
+ __le16 source_id;
+ __le16 resp_len;
+ __le16 unused_1;
+ __le32 resp_buf_addr_v[2];
+ #define FWD_RESP_CMPL_V 0x1UL
+ #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
+ #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
+};
+
+/* hwrm_async_event_cmpl (size:128b/16B) */
+struct hwrm_async_event_cmpl {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_TYPE_LAST ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG 0xaUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE 0x43UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RSS_CHANGE 0x47UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PEER_MMAP_CHANGE 0x4dUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_REPRESENTOR_PAIR_CHANGE 0x4eUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_STAT_CHANGE 0x4fUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HOST_COREDUMP 0x50UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x51UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_status_change {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN 0x0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK 0xff00000UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT 20
+};
+
+/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */
+struct hwrm_async_event_cmpl_port_conn_not_allowed {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
+};
+
+/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_speed_cfg_change {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
+};
+
+/* hwrm_async_event_cmpl_reset_notify (size:128b/16B) */
+struct hwrm_async_event_cmpl_reset_notify {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY 0x8UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_V 0x1UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_SFT 0
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_STOP_TX_QUEUE 0x1UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN 0x2UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK 0xff00UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_SFT 8
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST (0x1UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET (0x4UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION (0x5UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16
+};
+
+/* hwrm_async_event_cmpl_error_recovery (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_recovery {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY 0x9UL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED 0x2UL
+};
+
+/* hwrm_async_event_cmpl_ring_monitor_msg (size:128b/16B) */
+struct hwrm_async_event_cmpl_ring_monitor_msg {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG 0xaUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_TX 0x0UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX 0x1UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL 0x2UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_V 0x1UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_vf_cfg_change {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TF_OWNERSHIP_RELEASE 0x20UL
+};
+
+/* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_default_vnic_change {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_MASK 0xffc0UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_SFT 6
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION 0x35UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_MASK 0x3UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_SFT 0
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_ALLOC 0x1UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE 0x2UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_MASK 0x3fcUL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_SFT 2
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_MASK 0x3fffc00UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT 10
+};
+
+/* hwrm_async_event_cmpl_hw_flow_aged (size:128b/16B) */
+struct hwrm_async_event_cmpl_hw_flow_aged {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED 0x36UL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_MASK 0x7fffffffUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION 0x80000000UL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_RX (0x0UL << 31)
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX (0x1UL << 31)
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX
+};
+
+/* hwrm_async_event_cmpl_eem_cache_flush_req (size:128b/16B) */
+struct hwrm_async_event_cmpl_eem_cache_flush_req {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_V 0x1UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_eem_cache_flush_done (size:128b/16B) */
+struct hwrm_async_event_cmpl_eem_cache_flush_done {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_SFT 0
+};
+
+/* hwrm_async_event_cmpl_deferred_response (size:128b/16B) */
+struct hwrm_async_event_cmpl_deferred_response {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE 0x40UL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_echo_request (size:128b/16B) */
+struct hwrm_async_event_cmpl_echo_request {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST 0x42UL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_phc_update (size:128b/16B) */
+struct hwrm_async_event_cmpl_phc_update {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE 0x43UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_MASK 0xffff0000UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_SFT 16
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK 0xfUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_MASTER 0x1UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_SECONDARY 0x2UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_FAILOVER 0x3UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE 0x4UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT 4
+};
+
+/* hwrm_async_event_cmpl_pps_timestamp (size:128b/16B) */
+struct hwrm_async_event_cmpl_pps_timestamp {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP 0x44UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE 0x1UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL 0x0UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL 0x1UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_MASK 0xeUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_SFT 1
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_SFT 4
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_MASK 0xffffffffUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_SFT 0
+};
+
+/* hwrm_async_event_cmpl_error_report (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_SFT 0
+};
+
+/* hwrm_async_event_cmpl_dbg_buf_producer (size:128b/16B) */
+struct hwrm_async_event_cmpl_dbg_buf_producer {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK 0xffffffffUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_V 0x1UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT_TRACE 0x0UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT2_TRACE 0x1UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT_TRACE 0x2UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT2_TRACE 0x3UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP0_TRACE 0x4UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_L2_HWRM_TRACE 0x5UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_ROCE_HWRM_TRACE 0x6UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA0_TRACE 0x7UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA1_TRACE 0x8UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA2_TRACE 0x9UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP1_TRACE 0xaUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_AFM_KONG_HWRM_TRACE 0xbUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_AFM_KONG_HWRM_TRACE
+};
+
+/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */
+struct hwrm_async_event_cmpl_hwrm_error {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
+};
+
+/* hwrm_async_event_cmpl_error_report_base (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_base {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
+};
+
+/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_pause_storm {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM
+};
+
+/* hwrm_async_event_cmpl_error_report_invalid_signal (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_invalid_signal {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL
+};
+
+/* hwrm_async_event_cmpl_error_report_nvm (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_nvm {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_MASK 0xffffffffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR 0x3UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_MASK 0xff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_SFT 8
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_WRITE (0x1UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE (0x2UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE
+};
+
+/* hwrm_async_event_cmpl_error_report_doorbell_drop_threshold (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_doorbell_drop_threshold {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_MASK 0xffffff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_SFT 8
+};
+
+/* hwrm_async_event_cmpl_error_report_thermal (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_thermal {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK 0xff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT 8
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT 0x5UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK 0x700UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SFT 8
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN (0x0UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL (0x1UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL (0x2UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN (0x3UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR 0x800UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_DECREASING (0x0UL << 11)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING (0x1UL << 11)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING
+};
+
+/* hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
+};
+
+/* hwrm_func_reset_input (size:192b/24B) */
+struct hwrm_func_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
+ __le16 vf_id;
+ u8 func_reset_level;
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_LAST FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF
+ u8 unused_0;
+};
+
+/* hwrm_func_reset_output (size:128b/16B) */
+struct hwrm_func_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_getfid_input (size:192b/24B) */
+struct hwrm_func_getfid_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL
+ __le16 pci_id;
+ u8 unused_0[2];
+};
+
+/* hwrm_func_getfid_output (size:128b/16B) */
+struct hwrm_func_getfid_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_func_vf_alloc_input (size:192b/24B) */
+struct hwrm_func_vf_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* hwrm_func_vf_alloc_output (size:128b/16B) */
+struct hwrm_func_vf_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 first_vf_id;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_func_vf_free_input (size:192b/24B) */
+struct hwrm_func_vf_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* hwrm_func_vf_free_output (size:128b/16B) */
+struct hwrm_func_vf_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_vf_cfg_input (size:576b/72B) */
+struct hwrm_func_vf_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
+ #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
+ #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_TX_KEY_CTXS 0x1000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_RX_KEY_CTXS 0x2000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_TX_KEY_CTXS 0x4000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_RX_KEY_CTXS 0x8000UL
+ __le16 mtu;
+ __le16 guest_vlan;
+ __le16 async_event_cr;
+ u8 dflt_mac_addr[6];
+ __le32 flags;
+ #define FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x1UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x2UL
+ #define FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x4UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x8UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x10UL
+ #define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL
+ #define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL
+ #define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL
+ #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x100UL
+ #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x200UL
+ __le16 num_rsscos_ctxs;
+ __le16 num_cmpl_rings;
+ __le16 num_tx_rings;
+ __le16 num_rx_rings;
+ __le16 num_l2_ctxs;
+ __le16 num_vnics;
+ __le16 num_stat_ctxs;
+ __le16 num_hw_ring_grps;
+ __le32 num_ktls_tx_key_ctxs;
+ __le32 num_ktls_rx_key_ctxs;
+ __le16 num_msix;
+ u8 unused[2];
+ __le32 num_quic_tx_key_ctxs;
+ __le32 num_quic_rx_key_ctxs;
+};
+
+/* hwrm_func_vf_cfg_output (size:128b/16B) */
+struct hwrm_func_vf_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_qcaps_input (size:192b/24B) */
+struct hwrm_func_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_qcaps_output (size:1152b/144B) */
+struct hwrm_func_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le32 flags;
+ #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_COREDUMP_CMD_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_CRASHDUMP_CMD_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PFC_WD_STATS_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED 0x80000000UL
+ u8 mac_address[6];
+ __le16 max_rsscos_ctx;
+ __le16 max_cmpl_rings;
+ __le16 max_tx_rings;
+ __le16 max_rx_rings;
+ __le16 max_l2_ctxs;
+ __le16 max_vnics;
+ __le16 first_vf_id;
+ __le16 max_vfs;
+ __le16 max_stat_ctx;
+ __le32 max_encap_records;
+ __le32 max_decap_records;
+ __le32 max_tx_em_flows;
+ __le32 max_tx_wm_flows;
+ __le32 max_rx_em_flows;
+ __le32 max_rx_wm_flows;
+ __le32 max_mcast_filters;
+ __le32 max_flow_id;
+ __le32 max_hw_ring_grps;
+ __le16 max_sp_tx_rings;
+ __le16 max_msix_vfs;
+ __le32 flags_ext;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EVB_MODE_CFG_NOT_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PARTITION_BW_SUPPORTED 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_REQUIRED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DBR_PACING_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_HW_DBR_DROP_RECOV_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DISABLE_CQ_OVERFLOW_DETECTION_SUPPORTED 0x80000000UL
+ u8 max_schqs;
+ u8 mpc_chnls_cap;
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RCE 0x2UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TE_CFA 0x4UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA 0x8UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL
+ __le16 max_key_ctxs_alloc;
+ __le32 flags_ext2;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_QUIC_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_KDNET_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_GENERIC_STATS_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SYNCE_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_HW_LAG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ON_CHIP_CTX_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_STEERING_TAG_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ENHANCED_VF_SCALE_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_KEY_XID_PARTITION_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_CONCURRENT_KTLS_QUIC_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_CROSS_TC_CAP_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_CAP_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_RESERVATION_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DB_ERROR_STATS_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDCC_SUPPORTED 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_SO_TXTIME_SUPPORTED 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_INGRESS_NIC_FLOW_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_LPBK_STATS_SUPPORTED 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_EGRESS_NIC_FLOW_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_MULTI_LOSSLESS_QUEUES_SUPPORTED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_PEER_MMAP_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_PACING_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_VF_STAT_EJECTION_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_HOST_COREDUMP_SUPPORTED 0x80000000UL
+ __le16 tunnel_disable_flag;
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NVGRE 0x4UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_L2GRE 0x8UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_GRE 0x10UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP 0x20UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL
+ __le16 xid_partition_cap;
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_TX_CK 0x1UL
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_RX_CK 0x2UL
+ u8 device_serial_number[8];
+ __le16 ctxs_per_partition;
+ __le16 max_tso_segs;
+ __le32 roce_vf_max_av;
+ __le32 roce_vf_max_cq;
+ __le32 roce_vf_max_mrw;
+ __le32 roce_vf_max_qp;
+ __le32 roce_vf_max_srq;
+ __le32 roce_vf_max_gid;
+ __le32 flags_ext3;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_REQUIRE_L2_FILTER 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MAX_ROCE_VFS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_RX_RATE_PROFILE_SEL_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_BIDI_OPT_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED 0x20UL
+ __le16 max_roce_vfs;
+ __le16 max_crypto_rx_flow_filters;
+ u8 unused_3[3];
+ u8 valid;
+};
+
+/* hwrm_func_qcfg_input (size:192b/24B) */
+struct hwrm_func_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_qcfg_output (size:1344b/168B) */
+struct hwrm_func_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le16 vlan;
+ __le16 flags;
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
+ #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
+ #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
+ #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
+ #define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL
+ #define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL
+ #define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL
+ #define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL
+ #define FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED 0x800UL
+ #define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED 0x1000UL
+ #define FUNC_QCFG_RESP_FLAGS_MULTI_ROOT 0x2000UL
+ #define FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV 0x4000UL
+ #define FUNC_QCFG_RESP_FLAGS_ROCE_VNIC_ID_VALID 0x8000UL
+ u8 mac_address[6];
+ __le16 pci_id;
+ __le16 alloc_rsscos_ctx;
+ __le16 alloc_cmpl_rings;
+ __le16 alloc_tx_rings;
+ __le16 alloc_rx_rings;
+ __le16 alloc_l2_ctx;
+ __le16 alloc_vnics;
+ __le16 admin_mtu;
+ __le16 mru;
+ __le16 stat_ctx_id;
+ u8 port_partition_type;
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2 0x5UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_LAST FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN
+ u8 port_pf_cnt;
+ #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
+ #define FUNC_QCFG_RESP_PORT_PF_CNT_LAST FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL
+ __le16 dflt_vnic_id;
+ __le16 max_mtu_configured;
+ __le32 min_bw;
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 max_bw;
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 evb_mode;
+ #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
+ #define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA
+ u8 options;
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_SFT 2
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO
+ #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xf0UL
+ #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 4
+ __le16 alloc_vfs;
+ __le32 alloc_mcast_filters;
+ __le32 alloc_hw_ring_grps;
+ __le16 alloc_sp_tx_rings;
+ __le16 alloc_stat_ctx;
+ __le16 alloc_msix;
+ __le16 registered_vfs;
+ __le16 l2_doorbell_bar_size_kb;
+ u8 active_endpoints;
+ u8 always_1;
+ __le32 reset_addr_poll;
+ __le16 legacy_l2_db_size_kb;
+ __le16 svif_info;
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_MASK 0x7fffUL
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_SFT 0
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_VALID 0x8000UL
+ u8 mpc_chnls;
+ #define FUNC_QCFG_RESP_MPC_CHNLS_TCE_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_RCE_ENABLED 0x2UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_TE_CFA_ENABLED 0x4UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_RE_CFA_ENABLED 0x8UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_PRIMATE_ENABLED 0x10UL
+ u8 db_page_size;
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4KB 0x0UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_8KB 0x1UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_16KB 0x2UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_32KB 0x3UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_64KB 0x4UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_128KB 0x5UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_256KB 0x6UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_512KB 0x7UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_1MB 0x8UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_2MB 0x9UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB 0xaUL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_LAST FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB
+ __le16 roce_vnic_id;
+ __le32 partition_min_bw;
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
+ __le32 partition_max_bw;
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
+ __le16 host_mtu;
+ __le16 flags2;
+ #define FUNC_QCFG_RESP_FLAGS2_SRIOV_DSCP_INSERT_ENABLED 0x1UL
+ __le16 stag_vid;
+ u8 port_kdnet_mode;
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED
+ u8 kdnet_pcie_function;
+ __le16 port_kdnet_fid;
+ u8 unused_5;
+ u8 roce_bidi_opt_mode;
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DISABLED 0x1UL
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED 0x2UL
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_SHARED 0x4UL
+ __le32 num_ktls_tx_key_ctxs;
+ __le32 num_ktls_rx_key_ctxs;
+ u8 lag_id;
+ u8 parif;
+ u8 fw_lag_id;
+ u8 unused_6;
+ __le32 num_quic_tx_key_ctxs;
+ __le32 num_quic_rx_key_ctxs;
+ __le32 roce_max_av_per_vf;
+ __le32 roce_max_cq_per_vf;
+ __le32 roce_max_mrw_per_vf;
+ __le32 roce_max_qp_per_vf;
+ __le32 roce_max_srq_per_vf;
+ __le32 roce_max_gid_per_vf;
+ __le16 xid_partition_cfg;
+ #define FUNC_QCFG_RESP_XID_PARTITION_CFG_TX_CK 0x1UL
+ #define FUNC_QCFG_RESP_XID_PARTITION_CFG_RX_CK 0x2UL
+ __le16 mirror_vnic_id;
+ u8 unused_7[7];
+ u8 valid;
+};
+
+/* hwrm_func_cfg_input (size:1280b/160B) */
+struct hwrm_func_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 num_msix;
+ __le32 flags;
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
+ #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
+ #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL
+ #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL
+ #define FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x4000UL
+ #define FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x8000UL
+ #define FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x10000UL
+ #define FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x20000UL
+ #define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x40000UL
+ #define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL
+ #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
+ #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL
+ #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL
+ #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL
+ #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL
+ #define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL
+ #define FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS 0x4000000UL
+ #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x8000000UL
+ #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL
+ #define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE 0x20000000UL
+ #define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE 0x40000000UL
+ __le32 enables;
+ #define FUNC_CFG_REQ_ENABLES_ADMIN_MTU 0x1UL
+ #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
+ #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
+ #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
+ #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
+ #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
+ #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
+ #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
+ #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL
+ #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
+ #define FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT 0x800000UL
+ #define FUNC_CFG_REQ_ENABLES_SCHQ_ID 0x1000000UL
+ #define FUNC_CFG_REQ_ENABLES_MPC_CHNLS 0x2000000UL
+ #define FUNC_CFG_REQ_ENABLES_PARTITION_MIN_BW 0x4000000UL
+ #define FUNC_CFG_REQ_ENABLES_PARTITION_MAX_BW 0x8000000UL
+ #define FUNC_CFG_REQ_ENABLES_TPID 0x10000000UL
+ #define FUNC_CFG_REQ_ENABLES_HOST_MTU 0x20000000UL
+ #define FUNC_CFG_REQ_ENABLES_KTLS_TX_KEY_CTXS 0x40000000UL
+ #define FUNC_CFG_REQ_ENABLES_KTLS_RX_KEY_CTXS 0x80000000UL
+ __le16 admin_mtu;
+ __le16 mru;
+ __le16 num_rsscos_ctxs;
+ __le16 num_cmpl_rings;
+ __le16 num_tx_rings;
+ __le16 num_rx_rings;
+ __le16 num_l2_ctxs;
+ __le16 num_vnics;
+ __le16 num_stat_ctxs;
+ __le16 num_hw_ring_grps;
+ u8 dflt_mac_addr[6];
+ __le16 dflt_vlan;
+ __be32 dflt_ip_addr[4];
+ __le32 min_bw;
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 max_bw;
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+ __le16 async_event_cr;
+ u8 vlan_antispoof_mode;
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_LAST FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN
+ u8 allowed_vlan_pris;
+ u8 evb_mode;
+ #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
+ #define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA
+ u8 options;
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_SFT 2
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO
+ #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL
+ #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4
+ __le16 num_mcast_filters;
+ __le16 schq_id;
+ __le16 mpc_chnls;
+ #define FUNC_CFG_REQ_MPC_CHNLS_TCE_ENABLE 0x1UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TCE_DISABLE 0x2UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RCE_ENABLE 0x4UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RCE_DISABLE 0x8UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_ENABLE 0x10UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_DISABLE 0x20UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_ENABLE 0x40UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_DISABLE 0x80UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_ENABLE 0x100UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_DISABLE 0x200UL
+ __le32 partition_min_bw;
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
+ __le32 partition_max_bw;
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
+ __be16 tpid;
+ __le16 host_mtu;
+ __le32 flags2;
+ #define FUNC_CFG_REQ_FLAGS2_KTLS_KEY_CTX_ASSETS_TEST 0x1UL
+ #define FUNC_CFG_REQ_FLAGS2_QUIC_KEY_CTX_ASSETS_TEST 0x2UL
+ __le32 enables2;
+ #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
+ #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL
+ #define FUNC_CFG_REQ_ENABLES2_QUIC_TX_KEY_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES2_QUIC_RX_KEY_CTXS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF 0x10UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF 0x20UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF 0x40UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF 0x80UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF 0x100UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF 0x200UL
+ #define FUNC_CFG_REQ_ENABLES2_XID_PARTITION_CFG 0x400UL
+ #define FUNC_CFG_REQ_ENABLES2_PHYSICAL_SLOT_NUMBER 0x800UL
+ u8 port_kdnet_mode;
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_DISABLED 0x0UL
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED 0x1UL
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_LAST FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED
+ u8 db_page_size;
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_4KB 0x0UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_8KB 0x1UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_16KB 0x2UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_32KB 0x3UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_64KB 0x4UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_128KB 0x5UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_256KB 0x6UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_512KB 0x7UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_1MB 0x8UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_2MB 0x9UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_4MB 0xaUL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_LAST FUNC_CFG_REQ_DB_PAGE_SIZE_4MB
+ __le16 physical_slot_number;
+ __le32 num_ktls_tx_key_ctxs;
+ __le32 num_ktls_rx_key_ctxs;
+ __le32 num_quic_tx_key_ctxs;
+ __le32 num_quic_rx_key_ctxs;
+ __le32 roce_max_av_per_vf;
+ __le32 roce_max_cq_per_vf;
+ __le32 roce_max_mrw_per_vf;
+ __le32 roce_max_qp_per_vf;
+ __le32 roce_max_srq_per_vf;
+ __le32 roce_max_gid_per_vf;
+ __le16 xid_partition_cfg;
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_TX_CK 0x1UL
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_RX_CK 0x2UL
+ __le16 unused_2;
+};
+
+/* hwrm_func_cfg_output (size:128b/16B) */
+struct hwrm_func_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_cfg_cmd_err (size:64b/8B) */
+struct hwrm_func_cfg_cmd_err {
+ u8 code;
+ #define FUNC_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_BW_RANGE 0x1UL
+ #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_MORE_THAN_MAX 0x2UL
+ #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_BW_UNSUPPORTED 0x3UL
+ #define FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_PERCENT 0x4UL
+ #define FUNC_CFG_CMD_ERR_CODE_LAST FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_PERCENT
+ u8 unused_0[7];
+};
+
+/* hwrm_func_qstats_input (size:192b/24B) */
+struct hwrm_func_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 flags;
+ #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x2UL
+ #define FUNC_QSTATS_REQ_FLAGS_L2_ONLY 0x4UL
+ u8 unused_0[5];
+};
+
+/* hwrm_func_qstats_output (size:1408b/176B) */
+struct hwrm_func_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ u8 clear_seq;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_func_qstats_ext_input (size:256b/32B) */
+struct hwrm_func_qstats_ext_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 flags;
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL
+ u8 unused_0[1];
+ __le32 enables;
+ #define FUNC_QSTATS_EXT_REQ_ENABLES_SCHQ_ID 0x1UL
+ __le16 schq_id;
+ __le16 traffic_class;
+ u8 unused_1[4];
+};
+
+/* hwrm_func_qstats_ext_output (size:1536b/192B) */
+struct hwrm_func_qstats_ext_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_clr_stats_input (size:192b/24B) */
+struct hwrm_func_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_clr_stats_output (size:128b/16B) */
+struct hwrm_func_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_vf_resc_free_input (size:192b/24B) */
+struct hwrm_func_vf_resc_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_vf_resc_free_output (size:128b/16B) */
+struct hwrm_func_vf_resc_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_rgtr_input (size:896b/112B) */
+struct hwrm_func_drv_rgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_ASYM_QUEUE_CFG_SUPPORT 0x400UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_TF_INGRESS_NIC_FLOW_MODE 0x800UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_TF_EGRESS_NIC_FLOW_MODE 0x1000UL
+ __le32 enables;
+ #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
+ __le16 os_type;
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI
+ u8 ver_maj_8b;
+ u8 ver_min_8b;
+ u8 ver_upd_8b;
+ u8 unused_0[3];
+ __le32 timestamp;
+ u8 unused_1[4];
+ __le32 vf_req_fwd[8];
+ __le32 async_event_fwd[8];
+ __le16 ver_maj;
+ __le16 ver_min;
+ __le16 ver_upd;
+ __le16 ver_patch;
+};
+
+/* hwrm_func_drv_rgtr_output (size:128b/16B) */
+struct hwrm_func_drv_rgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED 0x1UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_drv_unrgtr_input (size:192b/24B) */
+struct hwrm_func_drv_unrgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL
+ u8 unused_0[4];
+};
+
+/* hwrm_func_drv_unrgtr_output (size:128b/16B) */
+struct hwrm_func_drv_unrgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_buf_rgtr_input (size:1024b/128B) */
+struct hwrm_func_buf_rgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL
+ #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL
+ __le16 vf_id;
+ __le16 req_buf_num_pages;
+ __le16 req_buf_page_size;
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_LAST FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G
+ __le16 req_buf_len;
+ __le16 resp_buf_len;
+ u8 unused_0[2];
+ __le64 req_buf_page_addr0;
+ __le64 req_buf_page_addr1;
+ __le64 req_buf_page_addr2;
+ __le64 req_buf_page_addr3;
+ __le64 req_buf_page_addr4;
+ __le64 req_buf_page_addr5;
+ __le64 req_buf_page_addr6;
+ __le64 req_buf_page_addr7;
+ __le64 req_buf_page_addr8;
+ __le64 req_buf_page_addr9;
+ __le64 error_buf_addr;
+ __le64 resp_buf_addr;
+};
+
+/* hwrm_func_buf_rgtr_output (size:128b/16B) */
+struct hwrm_func_buf_rgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_qver_input (size:192b/24B) */
+struct hwrm_func_drv_qver_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 reserved;
+ __le16 fid;
+ u8 driver_type;
+ #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_L2 0x0UL
+ #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE 0x1UL
+ #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_LAST FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE
+ u8 unused_0;
+};
+
+/* hwrm_func_drv_qver_output (size:256b/32B) */
+struct hwrm_func_drv_qver_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 os_type;
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_LAST FUNC_DRV_QVER_RESP_OS_TYPE_UEFI
+ u8 ver_maj_8b;
+ u8 ver_min_8b;
+ u8 ver_upd_8b;
+ u8 unused_0[3];
+ __le16 ver_maj;
+ __le16 ver_min;
+ __le16 ver_upd;
+ __le16 ver_patch;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_func_resource_qcaps_input (size:192b/24B) */
+struct hwrm_func_resource_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_resource_qcaps_output (size:704b/88B) */
+struct hwrm_func_resource_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 max_vfs;
+ __le16 max_msix;
+ __le16 vf_reservation_strategy;
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC 0x2UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC
+ __le16 min_rsscos_ctx;
+ __le16 max_rsscos_ctx;
+ __le16 min_cmpl_rings;
+ __le16 max_cmpl_rings;
+ __le16 min_tx_rings;
+ __le16 max_tx_rings;
+ __le16 min_rx_rings;
+ __le16 max_rx_rings;
+ __le16 min_l2_ctxs;
+ __le16 max_l2_ctxs;
+ __le16 min_vnics;
+ __le16 max_vnics;
+ __le16 min_stat_ctx;
+ __le16 max_stat_ctx;
+ __le16 min_hw_ring_grps;
+ __le16 max_hw_ring_grps;
+ __le16 max_tx_scheduler_inputs;
+ __le16 flags;
+ #define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL
+ __le16 min_msix;
+ __le32 min_ktls_tx_key_ctxs;
+ __le32 max_ktls_tx_key_ctxs;
+ __le32 min_ktls_rx_key_ctxs;
+ __le32 max_ktls_rx_key_ctxs;
+ __le32 min_quic_tx_key_ctxs;
+ __le32 max_quic_tx_key_ctxs;
+ __le32 min_quic_rx_key_ctxs;
+ __le32 max_quic_rx_key_ctxs;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_vf_resource_cfg_input (size:704b/88B) */
+struct hwrm_func_vf_resource_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ __le16 max_msix;
+ __le16 min_rsscos_ctx;
+ __le16 max_rsscos_ctx;
+ __le16 min_cmpl_rings;
+ __le16 max_cmpl_rings;
+ __le16 min_tx_rings;
+ __le16 max_tx_rings;
+ __le16 min_rx_rings;
+ __le16 max_rx_rings;
+ __le16 min_l2_ctxs;
+ __le16 max_l2_ctxs;
+ __le16 min_vnics;
+ __le16 max_vnics;
+ __le16 min_stat_ctx;
+ __le16 max_stat_ctx;
+ __le16 min_hw_ring_grps;
+ __le16 max_hw_ring_grps;
+ __le16 flags;
+ #define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL
+ __le16 min_msix;
+ __le32 min_ktls_tx_key_ctxs;
+ __le32 max_ktls_tx_key_ctxs;
+ __le32 min_ktls_rx_key_ctxs;
+ __le32 max_ktls_rx_key_ctxs;
+ __le32 min_quic_tx_key_ctxs;
+ __le32 max_quic_tx_key_ctxs;
+ __le32 min_quic_rx_key_ctxs;
+ __le32 max_quic_rx_key_ctxs;
+};
+
+/* hwrm_func_vf_resource_cfg_output (size:384b/48B) */
+struct hwrm_func_vf_resource_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 reserved_rsscos_ctx;
+ __le16 reserved_cmpl_rings;
+ __le16 reserved_tx_rings;
+ __le16 reserved_rx_rings;
+ __le16 reserved_l2_ctxs;
+ __le16 reserved_vnics;
+ __le16 reserved_stat_ctx;
+ __le16 reserved_hw_ring_grps;
+ __le32 reserved_ktls_tx_key_ctxs;
+ __le32 reserved_ktls_rx_key_ctxs;
+ __le32 reserved_quic_tx_key_ctxs;
+ __le32 reserved_quic_rx_key_ctxs;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */
+struct hwrm_func_backing_store_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_func_backing_store_qcaps_output (size:832b/104B) */
+struct hwrm_func_backing_store_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 qp_max_entries;
+ __le16 qp_min_qp1_entries;
+ __le16 qp_max_l2_entries;
+ __le16 qp_entry_size;
+ __le16 srq_max_l2_entries;
+ __le32 srq_max_entries;
+ __le16 srq_entry_size;
+ __le16 cq_max_l2_entries;
+ __le32 cq_max_entries;
+ __le16 cq_entry_size;
+ __le16 vnic_max_vnic_entries;
+ __le16 vnic_max_ring_table_entries;
+ __le16 vnic_entry_size;
+ __le32 stat_max_entries;
+ __le16 stat_entry_size;
+ __le16 tqm_entry_size;
+ __le32 tqm_min_entries_per_ring;
+ __le32 tqm_max_entries_per_ring;
+ __le32 mrav_max_entries;
+ __le16 mrav_entry_size;
+ __le16 tim_entry_size;
+ __le32 tim_max_entries;
+ __le16 mrav_num_entries_units;
+ u8 tqm_entries_multiple;
+ u8 ctx_kind_initializer;
+ __le16 ctx_init_mask;
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_QP 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_CQ 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_STAT 0x10UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_MRAV 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_TKC 0x40UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_RKC 0x80UL
+ u8 qp_init_offset;
+ u8 srq_init_offset;
+ u8 cq_init_offset;
+ u8 vnic_init_offset;
+ u8 tqm_fp_rings_count;
+ u8 stat_init_offset;
+ u8 mrav_init_offset;
+ u8 tqm_fp_rings_count_ext;
+ u8 tkc_init_offset;
+ u8 rkc_init_offset;
+ __le16 tkc_entry_size;
+ __le16 rkc_entry_size;
+ __le32 tkc_max_entries;
+ __le32 rkc_max_entries;
+ __le16 fast_qpmd_qp_num_entries;
+ u8 rsvd1[5];
+ u8 valid;
+};
+
+/* tqm_fp_ring_cfg (size:128b/16B) */
+struct tqm_fp_ring_cfg {
+ u8 tqm_ring_pg_size_tqm_ring_lvl;
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_MASK 0xfUL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_SFT 0
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_0 0x0UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_1 0x1UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2 0x2UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_SFT 4
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G
+ u8 unused[3];
+ __le32 tqm_ring_num_entries;
+ __le64 tqm_ring_page_dir;
+};
+
+/* hwrm_func_backing_store_cfg_input (size:2688b/336B) */
+struct hwrm_func_backing_store_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL
+ __le32 enables;
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TKC 0x80000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_RKC 0x100000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD 0x200000UL
+ u8 qpc_pg_size_qpc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G
+ u8 srq_pg_size_srq_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G
+ u8 cq_pg_size_cq_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G
+ u8 vnic_pg_size_vnic_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G
+ u8 stat_pg_size_stat_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G
+ u8 tqm_sp_pg_size_tqm_sp_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G
+ u8 tqm_ring0_pg_size_tqm_ring0_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G
+ u8 tqm_ring1_pg_size_tqm_ring1_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G
+ u8 tqm_ring2_pg_size_tqm_ring2_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G
+ u8 tqm_ring3_pg_size_tqm_ring3_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G
+ u8 tqm_ring4_pg_size_tqm_ring4_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G
+ u8 tqm_ring5_pg_size_tqm_ring5_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G
+ u8 tqm_ring6_pg_size_tqm_ring6_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G
+ u8 tqm_ring7_pg_size_tqm_ring7_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G
+ u8 mrav_pg_size_mrav_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G
+ u8 tim_pg_size_tim_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G
+ __le64 qpc_page_dir;
+ __le64 srq_page_dir;
+ __le64 cq_page_dir;
+ __le64 vnic_page_dir;
+ __le64 stat_page_dir;
+ __le64 tqm_sp_page_dir;
+ __le64 tqm_ring0_page_dir;
+ __le64 tqm_ring1_page_dir;
+ __le64 tqm_ring2_page_dir;
+ __le64 tqm_ring3_page_dir;
+ __le64 tqm_ring4_page_dir;
+ __le64 tqm_ring5_page_dir;
+ __le64 tqm_ring6_page_dir;
+ __le64 tqm_ring7_page_dir;
+ __le64 mrav_page_dir;
+ __le64 tim_page_dir;
+ __le32 qp_num_entries;
+ __le32 srq_num_entries;
+ __le32 cq_num_entries;
+ __le32 stat_num_entries;
+ __le32 tqm_sp_num_entries;
+ __le32 tqm_ring0_num_entries;
+ __le32 tqm_ring1_num_entries;
+ __le32 tqm_ring2_num_entries;
+ __le32 tqm_ring3_num_entries;
+ __le32 tqm_ring4_num_entries;
+ __le32 tqm_ring5_num_entries;
+ __le32 tqm_ring6_num_entries;
+ __le32 tqm_ring7_num_entries;
+ __le32 mrav_num_entries;
+ __le32 tim_num_entries;
+ __le16 qp_num_qp1_entries;
+ __le16 qp_num_l2_entries;
+ __le16 qp_entry_size;
+ __le16 srq_num_l2_entries;
+ __le16 srq_entry_size;
+ __le16 cq_num_l2_entries;
+ __le16 cq_entry_size;
+ __le16 vnic_num_vnic_entries;
+ __le16 vnic_num_ring_table_entries;
+ __le16 vnic_entry_size;
+ __le16 stat_entry_size;
+ __le16 tqm_entry_size;
+ __le16 mrav_entry_size;
+ __le16 tim_entry_size;
+ u8 tqm_ring8_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G
+ u8 ring8_unused[3];
+ __le32 tqm_ring8_num_entries;
+ __le64 tqm_ring8_page_dir;
+ u8 tqm_ring9_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G
+ u8 ring9_unused[3];
+ __le32 tqm_ring9_num_entries;
+ __le64 tqm_ring9_page_dir;
+ u8 tqm_ring10_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G
+ u8 ring10_unused[3];
+ __le32 tqm_ring10_num_entries;
+ __le64 tqm_ring10_page_dir;
+ __le32 tkc_num_entries;
+ __le32 rkc_num_entries;
+ __le64 tkc_page_dir;
+ __le64 rkc_page_dir;
+ __le16 tkc_entry_size;
+ __le16 rkc_entry_size;
+ u8 tkc_pg_size_tkc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G
+ u8 rkc_pg_size_rkc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G
+ __le16 qp_num_fast_qpmd_entries;
+};
+
+/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
+struct hwrm_func_backing_store_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_error_recovery_qcfg_input (size:192b/24B) */
+struct hwrm_error_recovery_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_error_recovery_qcfg_output (size:1664b/208B) */
+struct hwrm_error_recovery_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU 0x2UL
+ __le32 driver_polling_freq;
+ __le32 master_func_wait_period;
+ __le32 normal_func_wait_period;
+ __le32 master_func_wait_period_after_reset;
+ __le32 max_bailout_time_after_reset;
+ __le32 fw_health_status_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SFT 2
+ __le32 fw_heartbeat_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SFT 2
+ __le32 fw_reset_cnt_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SFT 2
+ __le32 reset_inprogress_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SFT 2
+ __le32 reset_inprogress_reg_mask;
+ u8 unused_0[3];
+ u8 reg_array_cnt;
+ __le32 reset_reg[16];
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SFT 2
+ __le32 reset_reg_val[16];
+ u8 delay_after_reset[16];
+ __le32 err_recovery_cnt_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SFT 2
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_func_echo_response_input (size:192b/24B) */
+struct hwrm_func_echo_response_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 event_data1;
+ __le32 event_data2;
+};
+
+/* hwrm_func_echo_response_output (size:128b/16B) */
+struct hwrm_func_echo_response_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_pin_qcfg_input (size:192b/24B) */
+struct hwrm_func_ptp_pin_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_func_ptp_pin_qcfg_output (size:128b/16B) */
+struct hwrm_func_ptp_pin_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_pins;
+ u8 state;
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN0_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN1_ENABLED 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN2_ENABLED 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN3_ENABLED 0x8UL
+ u8 pin0_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT
+ u8 pin1_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT
+ u8 pin2_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT
+ u8 pin3_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_func_ptp_pin_cfg_input (size:256b/32B) */
+struct hwrm_func_ptp_pin_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_STATE 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_USAGE 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_STATE 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_USAGE 0x8UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_STATE 0x10UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_USAGE 0x20UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_STATE 0x40UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_USAGE 0x80UL
+ u8 pin0_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED
+ u8 pin0_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT
+ u8 pin1_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED
+ u8 pin1_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT
+ u8 pin2_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED
+ u8 pin2_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT
+ u8 pin3_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED
+ u8 pin3_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT
+ u8 unused_0[4];
+};
+
+/* hwrm_func_ptp_pin_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_pin_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_cfg_input (size:384b/48B) */
+struct hwrm_func_ptp_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 enables;
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT 0x1UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_SOURCE 0x2UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_PHASE 0x4UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD 0x8UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP 0x10UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE 0x20UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_SET_TIME 0x40UL
+ u8 ptp_pps_event;
+ #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_INTERNAL 0x1UL
+ #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_EXTERNAL 0x2UL
+ u8 ptp_freq_adj_dll_source;
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_NONE 0x0UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_0 0x1UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_1 0x2UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_2 0x3UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_3 0x4UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_0 0x5UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_1 0x6UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_2 0x7UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_3 0x8UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID 0xffUL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID
+ u8 ptp_freq_adj_dll_phase;
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_NONE 0x0UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_4K 0x1UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_8K 0x2UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_10M 0x3UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M 0x4UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M
+ u8 unused_0[3];
+ __le32 ptp_freq_adj_ext_period;
+ __le32 ptp_freq_adj_ext_up;
+ __le32 ptp_freq_adj_ext_phase_lower;
+ __le32 ptp_freq_adj_ext_phase_upper;
+ __le64 ptp_set_time;
+};
+
+/* hwrm_func_ptp_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ts_query_input (size:192b/24B) */
+struct hwrm_func_ptp_ts_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PPS_TIME 0x1UL
+ #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PTM_TIME 0x2UL
+ u8 unused_0[4];
+};
+
+/* hwrm_func_ptp_ts_query_output (size:320b/40B) */
+struct hwrm_func_ptp_ts_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 pps_event_ts;
+ __le64 ptm_local_ts;
+ __le64 ptm_system_ts;
+ __le32 ptm_link_delay;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ext_cfg_input (size:256b/32B) */
+struct hwrm_func_ptp_ext_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 enables;
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_MASTER_FID 0x1UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_FID 0x2UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_MODE 0x4UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_FAILOVER_TIMER 0x8UL
+ __le16 phc_master_fid;
+ __le16 phc_sec_fid;
+ u8 phc_sec_mode;
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_SWITCH 0x0UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_ALL 0x1UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY 0x2UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_LAST FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY
+ u8 unused_0;
+ __le32 failover_timer;
+ u8 unused_1[4];
+};
+
+/* hwrm_func_ptp_ext_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_ext_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ext_qcfg_input (size:192b/24B) */
+struct hwrm_func_ptp_ext_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_func_ptp_ext_qcfg_output (size:256b/32B) */
+struct hwrm_func_ptp_ext_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 phc_master_fid;
+ __le16 phc_sec_fid;
+ __le16 phc_active_fid0;
+ __le16 phc_active_fid1;
+ __le32 last_failover_event;
+ __le16 from_fid;
+ __le16 to_fid;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_cfg_v2_input (size:512b/64B) */
+struct hwrm_func_backing_store_cfg_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
+ __le16 instance;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_EXTEND 0x4UL
+ __le64 page_dir;
+ __le32 num_entries;
+ __le16 entry_size;
+ u8 page_size_pbl_level;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G
+ u8 subtype_valid_cnt;
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ __le32 enables;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET 0x1UL
+ __le32 next_bs_offset;
+};
+
+/* hwrm_func_backing_store_cfg_v2_output (size:128b/16B) */
+struct hwrm_func_backing_store_cfg_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 rsvd0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_qcfg_v2_input (size:192b/24B) */
+struct hwrm_func_backing_store_qcfg_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION_TABLE 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
+ __le16 instance;
+ u8 rsvd[4];
+};
+
+/* hwrm_func_backing_store_qcfg_v2_output (size:448b/56B) */
+struct hwrm_func_backing_store_qcfg_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
+ __le16 instance;
+ __le32 flags;
+ __le64 page_dir;
+ __le32 num_entries;
+ u8 page_size_pbl_level;
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_SFT 0
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G
+ u8 subtype_valid_cnt;
+ u8 rsvd[2];
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ u8 rsvd2[7];
+ u8 valid;
+};
+
+/* qpc_split_entries (size:128b/16B) */
+struct qpc_split_entries {
+ __le32 qp_num_l2_entries;
+ __le32 qp_num_qp1_entries;
+ __le32 qp_num_fast_qpmd_entries;
+ __le32 rsvd;
+};
+
+/* srq_split_entries (size:128b/16B) */
+struct srq_split_entries {
+ __le32 srq_num_l2_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* cq_split_entries (size:128b/16B) */
+struct cq_split_entries {
+ __le32 cq_num_l2_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* vnic_split_entries (size:128b/16B) */
+struct vnic_split_entries {
+ __le32 vnic_num_vnic_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* mrav_split_entries (size:128b/16B) */
+struct mrav_split_entries {
+ __le32 mrav_num_av_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* ts_split_entries (size:128b/16B) */
+struct ts_split_entries {
+ __le32 region_num_entries;
+ u8 tsid;
+ u8 lkup_static_bkt_cnt_exp[2];
+ u8 locked;
+ __le32 rsvd2[2];
+};
+
+/* ck_split_entries (size:128b/16B) */
+struct ck_split_entries {
+ __le32 num_quic_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* hwrm_func_backing_store_qcaps_v2_input (size:192b/24B) */
+struct hwrm_func_backing_store_qcaps_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
+ u8 rsvd[6];
+};
+
+/* hwrm_func_backing_store_qcaps_v2_output (size:448b/56B) */
+struct hwrm_func_backing_store_qcaps_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
+ __le16 entry_size;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ROCE_QP_PSEUDO_STATIC_ALLOC 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_DBG_TRACE 0x10UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_BIN_DBG_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_NEXT_BS_OFFSET 0x40UL
+ __le32 instance_bit_map;
+ u8 ctx_init_value;
+ u8 ctx_init_offset;
+ u8 entry_multiple;
+ u8 rsvd;
+ __le32 max_num_entries;
+ __le32 min_num_entries;
+ __le16 next_valid_type;
+ u8 subtype_valid_cnt;
+ u8 exact_cnt_bit_map;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_0_EXACT 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_1_EXACT 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_2_EXACT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_3_EXACT 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_SFT 4
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ __le16 max_instance_count;
+ u8 rsvd3;
+ u8 valid;
+};
+
+/* hwrm_func_dbr_pacing_qcfg_input (size:128b/16B) */
+struct hwrm_func_dbr_pacing_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_func_dbr_pacing_qcfg_output (size:512b/64B) */
+struct hwrm_func_dbr_pacing_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define FUNC_DBR_PACING_QCFG_RESP_FLAGS_DBR_NQ_EVENT_ENABLED 0x1UL
+ u8 unused_0[7];
+ __le32 dbr_stat_db_fifo_reg;
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_SFT 0
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC 0x1UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR0 0x2UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_MASK 0xfffffffcUL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SFT 2
+ __le32 dbr_stat_db_fifo_reg_watermark_mask;
+ u8 dbr_stat_db_fifo_reg_watermark_shift;
+ u8 unused_1[3];
+ __le32 dbr_stat_db_fifo_reg_fifo_room_mask;
+ u8 dbr_stat_db_fifo_reg_fifo_room_shift;
+ u8 unused_2[3];
+ __le32 dbr_throttling_aeq_arm_reg;
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_MASK 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_SFT 0
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_GRC 0x1UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR0 0x2UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_MASK 0xfffffffcUL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SFT 2
+ u8 dbr_throttling_aeq_arm_reg_val;
+ u8 unused_3[3];
+ __le32 dbr_stat_db_max_fifo_depth;
+ __le32 primary_nq_id;
+ __le32 pacing_threshold;
+ u8 unused_4[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_if_change_input (size:192b/24B) */
+struct hwrm_func_drv_if_change_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP 0x1UL
+ __le32 unused;
+};
+
+/* hwrm_func_drv_if_change_output (size:128b/16B) */
+struct hwrm_func_drv_if_change_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE 0x2UL
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE 0x4UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_port_phy_cfg_input (size:512b/64B) */
+struct hwrm_port_phy_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
+ #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE 0x8000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE 0x10000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_ENABLE 0x20000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_DISABLE 0x40000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_ENABLE 0x80000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_DISABLE 0x100000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_ENABLE 0x200000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_DISABLE 0x400000UL
+ __le32 enables;
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
+ #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
+ #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
+ #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
+ #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
+ #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED 0x800UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK 0x1000UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2 0x2000UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK 0x4000UL
+ __le16 port_id;
+ __le16 force_link_speed;
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB
+ u8 auto_mode;
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_LAST PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
+ u8 auto_duplex;
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_LAST PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH
+ u8 auto_pause;
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
+ u8 mgmt_flag;
+ #define PORT_PHY_CFG_REQ_MGMT_FLAG_LINK_RELEASE 0x1UL
+ #define PORT_PHY_CFG_REQ_MGMT_FLAG_MGMT_VALID 0x80UL
+ __le16 auto_link_speed;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ u8 wirespeed;
+ #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
+ #define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON
+ u8 lpbk;
+ #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_CFG_REQ_LPBK_EXTERNAL 0x3UL
+ #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_EXTERNAL
+ u8 force_pause;
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
+ u8 unused_1;
+ __le32 preemphasis;
+ __le16 eee_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le16 force_pam4_link_speed;
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB
+ __le32 tx_lpi_timer;
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
+ __le16 auto_link_pam4_speed_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_50G 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_100G 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_200G 0x4UL
+ __le16 force_link_speeds2;
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112
+ __le16 auto_link_speeds2_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_1GB 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_10GB 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_25GB 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_40GB 0x8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB 0x10UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB 0x20UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_800GB_PAM4_112 0x2000UL
+ u8 unused_2[6];
+};
+
+/* hwrm_port_phy_cfg_output (size:128b/16B) */
+struct hwrm_port_phy_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */
+struct hwrm_port_phy_cfg_cmd_err {
+ u8 code;
+ #define PORT_PHY_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define PORT_PHY_CFG_CMD_ERR_CODE_ILLEGAL_SPEED 0x1UL
+ #define PORT_PHY_CFG_CMD_ERR_CODE_RETRY 0x2UL
+ #define PORT_PHY_CFG_CMD_ERR_CODE_LAST PORT_PHY_CFG_CMD_ERR_CODE_RETRY
+ u8 unused_0[7];
+};
+
+/* hwrm_port_phy_qcfg_input (size:192b/24B) */
+struct hwrm_port_phy_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_phy_qcfg_output (size:832b/104B) */
+struct hwrm_port_phy_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 link;
+ #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK
+ u8 active_fec_signal_mode;
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK 0xfUL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_SFT 0
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ 0x0UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4 0x1UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112 0x2UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK 0xf0UL
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_SFT 4
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE (0x0UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE (0x1UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE (0x2UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE (0x3UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE (0x4UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE (0x5UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE (0x6UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_LAST PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE
+ __le16 link_speed;
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_400GB 0xfa0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_800GB 0x1f40UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
+ u8 duplex_cfg;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_LAST PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL
+ u8 pause;
+ #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
+ __le16 support_speeds;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
+ __le16 force_link_speed;
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB
+ u8 auto_mode;
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
+ u8 auto_pause;
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
+ __le16 auto_link_speed;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ u8 wirespeed;
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON
+ u8 lpbk;
+ #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_QCFG_RESP_LPBK_EXTERNAL 0x3UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_EXTERNAL
+ u8 force_pause;
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
+ u8 module_status;
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT 0x5UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_OVERHEATED 0x6UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE
+ __le32 preemphasis;
+ u8 phy_maj;
+ u8 phy_min;
+ u8 phy_bld;
+ u8 phy_type;
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4 0x1cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4 0x1dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4 0x1eUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 0x1fUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR 0x20UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR 0x21UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR 0x22UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER 0x23UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2 0x24UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2 0x25UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2 0x26UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2 0x27UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR 0x28UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR 0x29UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR 0x2aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER 0x2bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2 0x2cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2 0x2dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2 0x2eUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2 0x2fUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8 0x30UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8 0x31UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8 0x32UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8 0x33UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4 0x34UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4 0x35UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4 0x36UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4 0x37UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASECR8 0x38UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASESR8 0x39UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASELR8 0x3aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEER8 0x3bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEFR8 0x3cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8 0x3dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8
+ u8 media_type;
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_BACKPLANE 0x4UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST PORT_PHY_QCFG_RESP_MEDIA_TYPE_BACKPLANE
+ u8 xcvr_pkg_type;
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL
+ u8 eee_config_phy_addr;
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL
+ u8 parallel_detect;
+ #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL
+ __le16 link_partner_adv_speeds;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
+ u8 link_partner_adv_auto_mode;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK
+ u8 link_partner_adv_pause;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
+ __le16 adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le16 link_partner_adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le32 xcvr_identifier_type_tx_lpi_timer;
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPDD (0x18UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP112 (0x1eUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFPDD (0x1fUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP (0x20UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP
+ __le16 fec_cfg;
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_SUPPORTED 0x80UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED 0x100UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_SUPPORTED 0x200UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_ENABLED 0x400UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_SUPPORTED 0x800UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_ENABLED 0x1000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_SUPPORTED 0x2000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_ENABLED 0x4000UL
+ u8 duplex_state;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
+ u8 option_flags;
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN 0x2UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SPEEDS2_SUPPORTED 0x4UL
+ char phy_vendor_name[16];
+ char phy_vendor_partnumber[16];
+ __le16 support_pam4_speeds;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G 0x4UL
+ __le16 force_pam4_link_speed;
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB
+ __le16 auto_pam4_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_50G 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_100G 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_200G 0x4UL
+ u8 link_partner_pam4_adv_speeds;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL
+ u8 link_down_reason;
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_OTP_SPEED_VIOLATION 0x2UL
+ __le16 support_speeds2;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_1GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_10GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_25GB 0x4UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_40GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_800GB_PAM4_112 0x2000UL
+ __le16 force_link_speeds2;
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112
+ __le16 auto_link_speeds2;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_1GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_10GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_25GB 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_40GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_800GB_PAM4_112 0x2000UL
+ u8 active_lanes;
+ u8 valid;
+};
+
+/* hwrm_port_mac_cfg_input (size:448b/56B) */
+struct hwrm_port_mac_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_ONE_STEP_TX_TS 0x2000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE 0x4000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE 0x8000UL
+ __le32 enables;
+ #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
+ #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
+ #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
+ #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
+ #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
+ #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
+ #define PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB 0x200UL
+ #define PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE 0x400UL
+ #define PORT_MAC_CFG_REQ_ENABLES_PTP_LOAD_CONTROL 0x800UL
+ __le16 port_id;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE
+ u8 vlan_pri2cos_map_pri;
+ u8 reserved1;
+ u8 tunnel_pri2cos_map_pri;
+ u8 dscp2pri_map_pri;
+ __le16 rx_ts_capture_ptp_msg_type;
+ __le16 tx_ts_capture_ptp_msg_type;
+ u8 cos_field_cfg;
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
+ u8 unused_0[3];
+ __le32 ptp_freq_adj_ppb;
+ u8 unused_1[3];
+ u8 ptp_load_control;
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_NONE 0x0UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_IMMEDIATE 0x1UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT 0x2UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_LAST PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT
+ __le64 ptp_adj_phase;
+};
+
+/* hwrm_port_mac_cfg_output (size:128b/16B) */
+struct hwrm_port_mac_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ __le16 mtu;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */
+struct hwrm_port_mac_ptp_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_mac_ptp_qcfg_output (size:704b/88B) */
+struct hwrm_port_mac_ptp_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK 0x10UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED 0x20UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME 0x40UL
+ u8 unused_0[3];
+ __le32 rx_ts_reg_off_lower;
+ __le32 rx_ts_reg_off_upper;
+ __le32 rx_ts_reg_off_seq_id;
+ __le32 rx_ts_reg_off_src_id_0;
+ __le32 rx_ts_reg_off_src_id_1;
+ __le32 rx_ts_reg_off_src_id_2;
+ __le32 rx_ts_reg_off_domain_id;
+ __le32 rx_ts_reg_off_fifo;
+ __le32 rx_ts_reg_off_fifo_adv;
+ __le32 rx_ts_reg_off_granularity;
+ __le32 tx_ts_reg_off_lower;
+ __le32 tx_ts_reg_off_upper;
+ __le32 tx_ts_reg_off_seq_id;
+ __le32 tx_ts_reg_off_fifo;
+ __le32 tx_ts_reg_off_granularity;
+ __le32 ts_ref_clock_reg_lower;
+ __le32 ts_ref_clock_reg_upper;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* tx_port_stats (size:3264b/408B) */
+struct tx_port_stats {
+ __le64 tx_64b_frames;
+ __le64 tx_65b_127b_frames;
+ __le64 tx_128b_255b_frames;
+ __le64 tx_256b_511b_frames;
+ __le64 tx_512b_1023b_frames;
+ __le64 tx_1024b_1518b_frames;
+ __le64 tx_good_vlan_frames;
+ __le64 tx_1519b_2047b_frames;
+ __le64 tx_2048b_4095b_frames;
+ __le64 tx_4096b_9216b_frames;
+ __le64 tx_9217b_16383b_frames;
+ __le64 tx_good_frames;
+ __le64 tx_total_frames;
+ __le64 tx_ucast_frames;
+ __le64 tx_mcast_frames;
+ __le64 tx_bcast_frames;
+ __le64 tx_pause_frames;
+ __le64 tx_pfc_frames;
+ __le64 tx_jabber_frames;
+ __le64 tx_fcs_err_frames;
+ __le64 tx_control_frames;
+ __le64 tx_oversz_frames;
+ __le64 tx_single_dfrl_frames;
+ __le64 tx_multi_dfrl_frames;
+ __le64 tx_single_coll_frames;
+ __le64 tx_multi_coll_frames;
+ __le64 tx_late_coll_frames;
+ __le64 tx_excessive_coll_frames;
+ __le64 tx_frag_frames;
+ __le64 tx_err;
+ __le64 tx_tagged_frames;
+ __le64 tx_dbl_tagged_frames;
+ __le64 tx_runt_frames;
+ __le64 tx_fifo_underruns;
+ __le64 tx_pfc_ena_frames_pri0;
+ __le64 tx_pfc_ena_frames_pri1;
+ __le64 tx_pfc_ena_frames_pri2;
+ __le64 tx_pfc_ena_frames_pri3;
+ __le64 tx_pfc_ena_frames_pri4;
+ __le64 tx_pfc_ena_frames_pri5;
+ __le64 tx_pfc_ena_frames_pri6;
+ __le64 tx_pfc_ena_frames_pri7;
+ __le64 tx_eee_lpi_events;
+ __le64 tx_eee_lpi_duration;
+ __le64 tx_llfc_logical_msgs;
+ __le64 tx_hcfc_msgs;
+ __le64 tx_total_collisions;
+ __le64 tx_bytes;
+ __le64 tx_xthol_frames;
+ __le64 tx_stat_discard;
+ __le64 tx_stat_error;
+};
+
+/* rx_port_stats (size:4224b/528B) */
+struct rx_port_stats {
+ __le64 rx_64b_frames;
+ __le64 rx_65b_127b_frames;
+ __le64 rx_128b_255b_frames;
+ __le64 rx_256b_511b_frames;
+ __le64 rx_512b_1023b_frames;
+ __le64 rx_1024b_1518b_frames;
+ __le64 rx_good_vlan_frames;
+ __le64 rx_1519b_2047b_frames;
+ __le64 rx_2048b_4095b_frames;
+ __le64 rx_4096b_9216b_frames;
+ __le64 rx_9217b_16383b_frames;
+ __le64 rx_total_frames;
+ __le64 rx_ucast_frames;
+ __le64 rx_mcast_frames;
+ __le64 rx_bcast_frames;
+ __le64 rx_fcs_err_frames;
+ __le64 rx_ctrl_frames;
+ __le64 rx_pause_frames;
+ __le64 rx_pfc_frames;
+ __le64 rx_unsupported_opcode_frames;
+ __le64 rx_unsupported_da_pausepfc_frames;
+ __le64 rx_wrong_sa_frames;
+ __le64 rx_align_err_frames;
+ __le64 rx_oor_len_frames;
+ __le64 rx_code_err_frames;
+ __le64 rx_false_carrier_frames;
+ __le64 rx_ovrsz_frames;
+ __le64 rx_jbr_frames;
+ __le64 rx_mtu_err_frames;
+ __le64 rx_match_crc_frames;
+ __le64 rx_promiscuous_frames;
+ __le64 rx_tagged_frames;
+ __le64 rx_double_tagged_frames;
+ __le64 rx_trunc_frames;
+ __le64 rx_good_frames;
+ __le64 rx_pfc_xon2xoff_frames_pri0;
+ __le64 rx_pfc_xon2xoff_frames_pri1;
+ __le64 rx_pfc_xon2xoff_frames_pri2;
+ __le64 rx_pfc_xon2xoff_frames_pri3;
+ __le64 rx_pfc_xon2xoff_frames_pri4;
+ __le64 rx_pfc_xon2xoff_frames_pri5;
+ __le64 rx_pfc_xon2xoff_frames_pri6;
+ __le64 rx_pfc_xon2xoff_frames_pri7;
+ __le64 rx_pfc_ena_frames_pri0;
+ __le64 rx_pfc_ena_frames_pri1;
+ __le64 rx_pfc_ena_frames_pri2;
+ __le64 rx_pfc_ena_frames_pri3;
+ __le64 rx_pfc_ena_frames_pri4;
+ __le64 rx_pfc_ena_frames_pri5;
+ __le64 rx_pfc_ena_frames_pri6;
+ __le64 rx_pfc_ena_frames_pri7;
+ __le64 rx_sch_crc_err_frames;
+ __le64 rx_undrsz_frames;
+ __le64 rx_frag_frames;
+ __le64 rx_eee_lpi_events;
+ __le64 rx_eee_lpi_duration;
+ __le64 rx_llfc_physical_msgs;
+ __le64 rx_llfc_logical_msgs;
+ __le64 rx_llfc_msgs_with_crc_err;
+ __le64 rx_hcfc_msgs;
+ __le64 rx_hcfc_msgs_with_crc_err;
+ __le64 rx_bytes;
+ __le64 rx_runt_bytes;
+ __le64 rx_runt_frames;
+ __le64 rx_stat_discard;
+ __le64 rx_stat_err;
+};
+
+/* hwrm_port_qstats_input (size:320b/40B) */
+struct hwrm_port_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 flags;
+ #define PORT_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[5];
+ __le64 tx_stat_host_addr;
+ __le64 rx_stat_host_addr;
+};
+
+/* hwrm_port_qstats_output (size:128b/16B) */
+struct hwrm_port_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tx_stat_size;
+ __le16 rx_stat_size;
+ u8 flags;
+ #define PORT_QSTATS_RESP_FLAGS_CLEARED 0x1UL
+ u8 unused_0[2];
+ u8 valid;
+};
+
+/* tx_port_stats_ext (size:2048b/256B) */
+struct tx_port_stats_ext {
+ __le64 tx_bytes_cos0;
+ __le64 tx_bytes_cos1;
+ __le64 tx_bytes_cos2;
+ __le64 tx_bytes_cos3;
+ __le64 tx_bytes_cos4;
+ __le64 tx_bytes_cos5;
+ __le64 tx_bytes_cos6;
+ __le64 tx_bytes_cos7;
+ __le64 tx_packets_cos0;
+ __le64 tx_packets_cos1;
+ __le64 tx_packets_cos2;
+ __le64 tx_packets_cos3;
+ __le64 tx_packets_cos4;
+ __le64 tx_packets_cos5;
+ __le64 tx_packets_cos6;
+ __le64 tx_packets_cos7;
+ __le64 pfc_pri0_tx_duration_us;
+ __le64 pfc_pri0_tx_transitions;
+ __le64 pfc_pri1_tx_duration_us;
+ __le64 pfc_pri1_tx_transitions;
+ __le64 pfc_pri2_tx_duration_us;
+ __le64 pfc_pri2_tx_transitions;
+ __le64 pfc_pri3_tx_duration_us;
+ __le64 pfc_pri3_tx_transitions;
+ __le64 pfc_pri4_tx_duration_us;
+ __le64 pfc_pri4_tx_transitions;
+ __le64 pfc_pri5_tx_duration_us;
+ __le64 pfc_pri5_tx_transitions;
+ __le64 pfc_pri6_tx_duration_us;
+ __le64 pfc_pri6_tx_transitions;
+ __le64 pfc_pri7_tx_duration_us;
+ __le64 pfc_pri7_tx_transitions;
+};
+
+/* rx_port_stats_ext (size:3904b/488B) */
+struct rx_port_stats_ext {
+ __le64 link_down_events;
+ __le64 continuous_pause_events;
+ __le64 resume_pause_events;
+ __le64 continuous_roce_pause_events;
+ __le64 resume_roce_pause_events;
+ __le64 rx_bytes_cos0;
+ __le64 rx_bytes_cos1;
+ __le64 rx_bytes_cos2;
+ __le64 rx_bytes_cos3;
+ __le64 rx_bytes_cos4;
+ __le64 rx_bytes_cos5;
+ __le64 rx_bytes_cos6;
+ __le64 rx_bytes_cos7;
+ __le64 rx_packets_cos0;
+ __le64 rx_packets_cos1;
+ __le64 rx_packets_cos2;
+ __le64 rx_packets_cos3;
+ __le64 rx_packets_cos4;
+ __le64 rx_packets_cos5;
+ __le64 rx_packets_cos6;
+ __le64 rx_packets_cos7;
+ __le64 pfc_pri0_rx_duration_us;
+ __le64 pfc_pri0_rx_transitions;
+ __le64 pfc_pri1_rx_duration_us;
+ __le64 pfc_pri1_rx_transitions;
+ __le64 pfc_pri2_rx_duration_us;
+ __le64 pfc_pri2_rx_transitions;
+ __le64 pfc_pri3_rx_duration_us;
+ __le64 pfc_pri3_rx_transitions;
+ __le64 pfc_pri4_rx_duration_us;
+ __le64 pfc_pri4_rx_transitions;
+ __le64 pfc_pri5_rx_duration_us;
+ __le64 pfc_pri5_rx_transitions;
+ __le64 pfc_pri6_rx_duration_us;
+ __le64 pfc_pri6_rx_transitions;
+ __le64 pfc_pri7_rx_duration_us;
+ __le64 pfc_pri7_rx_transitions;
+ __le64 rx_bits;
+ __le64 rx_buffer_passed_threshold;
+ __le64 rx_pcs_symbol_err;
+ __le64 rx_corrected_bits;
+ __le64 rx_discard_bytes_cos0;
+ __le64 rx_discard_bytes_cos1;
+ __le64 rx_discard_bytes_cos2;
+ __le64 rx_discard_bytes_cos3;
+ __le64 rx_discard_bytes_cos4;
+ __le64 rx_discard_bytes_cos5;
+ __le64 rx_discard_bytes_cos6;
+ __le64 rx_discard_bytes_cos7;
+ __le64 rx_discard_packets_cos0;
+ __le64 rx_discard_packets_cos1;
+ __le64 rx_discard_packets_cos2;
+ __le64 rx_discard_packets_cos3;
+ __le64 rx_discard_packets_cos4;
+ __le64 rx_discard_packets_cos5;
+ __le64 rx_discard_packets_cos6;
+ __le64 rx_discard_packets_cos7;
+ __le64 rx_fec_corrected_blocks;
+ __le64 rx_fec_uncorrectable_blocks;
+ __le64 rx_filter_miss;
+ __le64 rx_fec_symbol_err;
+};
+
+/* hwrm_port_qstats_ext_input (size:320b/40B) */
+struct hwrm_port_qstats_ext_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 tx_stat_size;
+ __le16 rx_stat_size;
+ u8 flags;
+ #define PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0;
+ __le64 tx_stat_host_addr;
+ __le64 rx_stat_host_addr;
+};
+
+/* hwrm_port_qstats_ext_output (size:128b/16B) */
+struct hwrm_port_qstats_ext_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tx_stat_size;
+ __le16 rx_stat_size;
+ __le16 total_active_cos_queues;
+ u8 flags;
+ #define PORT_QSTATS_EXT_RESP_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED 0x1UL
+ #define PORT_QSTATS_EXT_RESP_FLAGS_CLEARED 0x2UL
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_qstats_input (size:256b/32B) */
+struct hwrm_port_lpbk_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 lpbk_stat_size;
+ u8 flags;
+ #define PORT_LPBK_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[5];
+ __le64 lpbk_stat_host_addr;
+};
+
+/* hwrm_port_lpbk_qstats_output (size:128b/16B) */
+struct hwrm_port_lpbk_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 lpbk_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* port_lpbk_stats (size:640b/80B) */
+struct port_lpbk_stats {
+ __le64 lpbk_ucast_frames;
+ __le64 lpbk_mcast_frames;
+ __le64 lpbk_bcast_frames;
+ __le64 lpbk_ucast_bytes;
+ __le64 lpbk_mcast_bytes;
+ __le64 lpbk_bcast_bytes;
+ __le64 lpbk_tx_discards;
+ __le64 lpbk_tx_errors;
+ __le64 lpbk_rx_discards;
+ __le64 lpbk_rx_errors;
+};
+
+/* hwrm_port_ecn_qstats_input (size:256b/32B) */
+struct hwrm_port_ecn_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 ecn_stat_buf_size;
+ u8 flags;
+ #define PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
+ __le64 ecn_stat_host_addr;
+};
+
+/* hwrm_port_ecn_qstats_output (size:128b/16B) */
+struct hwrm_port_ecn_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 ecn_stat_buf_size;
+ u8 mark_en;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* port_stats_ecn (size:512b/64B) */
+struct port_stats_ecn {
+ __le64 mark_cnt_cos0;
+ __le64 mark_cnt_cos1;
+ __le64 mark_cnt_cos2;
+ __le64 mark_cnt_cos3;
+ __le64 mark_cnt_cos4;
+ __le64 mark_cnt_cos5;
+ __le64 mark_cnt_cos6;
+ __le64 mark_cnt_cos7;
+};
+
+/* hwrm_port_clr_stats_input (size:192b/24B) */
+struct hwrm_port_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 flags;
+ #define PORT_CLR_STATS_REQ_FLAGS_ROCE_COUNTERS 0x1UL
+ u8 unused_0[5];
+};
+
+/* hwrm_port_clr_stats_output (size:128b/16B) */
+struct hwrm_port_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_clr_stats_input (size:192b/24B) */
+struct hwrm_port_lpbk_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
+struct hwrm_port_lpbk_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_ts_query_input (size:320b/40B) */
+struct hwrm_port_ts_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH 0x1UL
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH_TX 0x0UL
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH_RX 0x1UL
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH_LAST PORT_TS_QUERY_REQ_FLAGS_PATH_RX
+ #define PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME 0x2UL
+ __le16 port_id;
+ u8 unused_0[2];
+ __le16 enables;
+ #define PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT 0x1UL
+ #define PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID 0x2UL
+ #define PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET 0x4UL
+ __le16 ts_req_timeout;
+ __le32 ptp_seq_id;
+ __le16 ptp_hdr_offset;
+ u8 unused_1[6];
+};
+
+/* hwrm_port_ts_query_output (size:192b/24B) */
+struct hwrm_port_ts_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 ptp_msg_ts;
+ __le16 ptp_msg_seqid;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_port_phy_qcaps_input (size:192b/24B) */
+struct hwrm_port_phy_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_phy_qcaps_output (size:320b/40B) */
+struct hwrm_port_phy_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x20UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN 0x40UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS 0x80UL
+ u8 port_cnt;
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_12 0xcUL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_12
+ __le16 supported_speeds_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
+ __le16 supported_speeds_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
+ __le16 supported_speeds_eee_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
+ __le32 tx_lpi_timer_low;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
+ #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24
+ __le32 valid_tx_lpi_timer_high;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
+ #define PORT_PHY_QCAPS_RESP_RSVD_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_RSVD_SFT 24
+ __le16 supported_pam4_speeds_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_50G 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_100G 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_200G 0x4UL
+ __le16 supported_pam4_speeds_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_50G 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
+ __le16 flags2;
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_REMOTE_LPBK_UNSUPPORTED 0x10UL
+ u8 internal_port_cnt;
+ u8 unused_0;
+ __le16 supported_speeds2_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_1GB 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_10GB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_25GB 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_40GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_800GB_PAM4_112 0x2000UL
+ __le16 supported_speeds2_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_1GB 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_10GB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_25GB 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_40GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_800GB_PAM4_112 0x2000UL
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_port_phy_i2c_write_input (size:832b/104B) */
+struct hwrm_port_phy_i2c_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET 0x1UL
+ #define PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER 0x2UL
+ __le16 port_id;
+ u8 i2c_slave_addr;
+ u8 bank_number;
+ __le16 page_number;
+ __le16 page_offset;
+ u8 data_length;
+ u8 unused_1[7];
+ __le32 data[16];
+};
+
+/* hwrm_port_phy_i2c_write_output (size:128b/16B) */
+struct hwrm_port_phy_i2c_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
+struct hwrm_port_phy_i2c_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL
+ #define PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER 0x2UL
+ __le16 port_id;
+ u8 i2c_slave_addr;
+ u8 bank_number;
+ __le16 page_number;
+ __le16 page_offset;
+ u8 data_length;
+ u8 unused_1[7];
+};
+
+/* hwrm_port_phy_i2c_read_output (size:640b/80B) */
+struct hwrm_port_phy_i2c_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 data[16];
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_mdio_write_input (size:320b/40B) */
+struct hwrm_port_phy_mdio_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 unused_0[2];
+ __le16 port_id;
+ u8 phy_addr;
+ u8 dev_addr;
+ __le16 reg_addr;
+ __le16 reg_data;
+ u8 cl45_mdio;
+ u8 unused_1[7];
+};
+
+/* hwrm_port_phy_mdio_write_output (size:128b/16B) */
+struct hwrm_port_phy_mdio_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_mdio_read_input (size:256b/32B) */
+struct hwrm_port_phy_mdio_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 unused_0[2];
+ __le16 port_id;
+ u8 phy_addr;
+ u8 dev_addr;
+ __le16 reg_addr;
+ u8 cl45_mdio;
+ u8 unused_1;
+};
+
+/* hwrm_port_phy_mdio_read_output (size:128b/16B) */
+struct hwrm_port_phy_mdio_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 reg_data;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_port_led_cfg_input (size:512b/64B) */
+struct hwrm_port_led_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL
+ __le16 port_id;
+ u8 num_leds;
+ u8 rsvd;
+ u8 led0_id;
+ u8 led0_state;
+ #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_LAST PORT_LED_CFG_REQ_LED0_STATE_BLINKALT
+ u8 led0_color;
+ #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_LAST PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER
+ u8 unused_0;
+ __le16 led0_blink_on;
+ __le16 led0_blink_off;
+ u8 led0_group_id;
+ u8 rsvd0;
+ u8 led1_id;
+ u8 led1_state;
+ #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_LAST PORT_LED_CFG_REQ_LED1_STATE_BLINKALT
+ u8 led1_color;
+ #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_LAST PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER
+ u8 unused_1;
+ __le16 led1_blink_on;
+ __le16 led1_blink_off;
+ u8 led1_group_id;
+ u8 rsvd1;
+ u8 led2_id;
+ u8 led2_state;
+ #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_LAST PORT_LED_CFG_REQ_LED2_STATE_BLINKALT
+ u8 led2_color;
+ #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_LAST PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER
+ u8 unused_2;
+ __le16 led2_blink_on;
+ __le16 led2_blink_off;
+ u8 led2_group_id;
+ u8 rsvd2;
+ u8 led3_id;
+ u8 led3_state;
+ #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_LAST PORT_LED_CFG_REQ_LED3_STATE_BLINKALT
+ u8 led3_color;
+ #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_LAST PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER
+ u8 unused_3;
+ __le16 led3_blink_on;
+ __le16 led3_blink_off;
+ u8 led3_group_id;
+ u8 rsvd3;
+};
+
+/* hwrm_port_led_cfg_output (size:128b/16B) */
+struct hwrm_port_led_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_led_qcfg_input (size:192b/24B) */
+struct hwrm_port_led_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_led_qcfg_output (size:448b/56B) */
+struct hwrm_port_led_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_leds;
+ u8 led0_id;
+ u8 led0_type;
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_LAST PORT_LED_QCFG_RESP_LED0_TYPE_INVALID
+ u8 led0_state;
+ #define PORT_LED_QCFG_RESP_LED0_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_LAST PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT
+ u8 led0_color;
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_LAST PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER
+ u8 unused_0;
+ __le16 led0_blink_on;
+ __le16 led0_blink_off;
+ u8 led0_group_id;
+ u8 led1_id;
+ u8 led1_type;
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_LAST PORT_LED_QCFG_RESP_LED1_TYPE_INVALID
+ u8 led1_state;
+ #define PORT_LED_QCFG_RESP_LED1_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_LAST PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT
+ u8 led1_color;
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_LAST PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER
+ u8 unused_1;
+ __le16 led1_blink_on;
+ __le16 led1_blink_off;
+ u8 led1_group_id;
+ u8 led2_id;
+ u8 led2_type;
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_LAST PORT_LED_QCFG_RESP_LED2_TYPE_INVALID
+ u8 led2_state;
+ #define PORT_LED_QCFG_RESP_LED2_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_LAST PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT
+ u8 led2_color;
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_LAST PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER
+ u8 unused_2;
+ __le16 led2_blink_on;
+ __le16 led2_blink_off;
+ u8 led2_group_id;
+ u8 led3_id;
+ u8 led3_type;
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_LAST PORT_LED_QCFG_RESP_LED3_TYPE_INVALID
+ u8 led3_state;
+ #define PORT_LED_QCFG_RESP_LED3_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_LAST PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT
+ u8 led3_color;
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_LAST PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER
+ u8 unused_3;
+ __le16 led3_blink_on;
+ __le16 led3_blink_off;
+ u8 led3_group_id;
+ u8 unused_4[6];
+ u8 valid;
+};
+
+/* hwrm_port_led_qcaps_input (size:192b/24B) */
+struct hwrm_port_led_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_led_qcaps_output (size:384b/48B) */
+struct hwrm_port_led_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_leds;
+ u8 unused[3];
+ u8 led0_id;
+ u8 led0_type;
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_LAST PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID
+ u8 led0_group_id;
+ u8 unused_0;
+ __le16 led0_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led0_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 led1_id;
+ u8 led1_type;
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_LAST PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID
+ u8 led1_group_id;
+ u8 unused_1;
+ __le16 led1_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led1_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 led2_id;
+ u8 led2_type;
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_LAST PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID
+ u8 led2_group_id;
+ u8 unused_2;
+ __le16 led2_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led2_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 led3_id;
+ u8 led3_type;
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_LAST PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID
+ u8 led3_group_id;
+ u8 unused_3;
+ __le16 led3_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led3_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 unused_4[3];
+ u8 valid;
+};
+
+/* hwrm_port_mac_qcaps_input (size:192b/24B) */
+struct hwrm_port_mac_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_mac_qcaps_output (size:128b/16B) */
+struct hwrm_port_mac_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_MAC_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x1UL
+ #define PORT_MAC_QCAPS_RESP_FLAGS_REMOTE_LPBK_SUPPORTED 0x2UL
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_queue_qportcfg_input (size:192b/24B) */
+struct hwrm_queue_qportcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
+ __le16 port_id;
+ u8 drv_qmap_cap;
+ #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_DISABLED 0x0UL
+ #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED 0x1UL
+ #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_LAST QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED
+ u8 unused_0;
+};
+
+/* hwrm_queue_qportcfg_output (size:1344b/168B) */
+struct hwrm_queue_qportcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 max_configurable_queues;
+ u8 max_configurable_lossless_queues;
+ u8 queue_cfg_allowed;
+ u8 queue_cfg_info;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_USE_PROFILE_TYPE 0x2UL
+ u8 queue_pfcenable_cfg_allowed;
+ u8 queue_pri2cos_cfg_allowed;
+ u8 queue_cos2bw_cfg_allowed;
+ u8 queue_id0;
+ u8 queue_id0_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id1;
+ u8 queue_id1_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id2;
+ u8 queue_id2_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id3;
+ u8 queue_id3_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id4;
+ u8 queue_id4_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id5;
+ u8 queue_id5_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id6;
+ u8 queue_id6_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id7;
+ u8 queue_id7_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id0_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ char qid0_name[16];
+ char qid1_name[16];
+ char qid2_name[16];
+ char qid3_name[16];
+ char qid4_name[16];
+ char qid5_name[16];
+ char qid6_name[16];
+ char qid7_name[16];
+ u8 queue_id1_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id2_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id3_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id4_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id5_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id6_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id7_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 valid;
+};
+
+/* hwrm_queue_qcfg_input (size:192b/24B) */
+struct hwrm_queue_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_QCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_LAST QUEUE_QCFG_REQ_FLAGS_PATH_RX
+ __le32 queue_id;
+};
+
+/* hwrm_queue_qcfg_output (size:128b/16B) */
+struct hwrm_queue_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 queue_len;
+ u8 service_profile;
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LAST QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN
+ u8 queue_cfg_info;
+ #define QUEUE_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_queue_cfg_input (size:320b/40B) */
+struct hwrm_queue_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0
+ #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
+ __le32 enables;
+ #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
+ #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
+ __le32 queue_id;
+ __le32 dflt_len;
+ u8 service_profile;
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LAST QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_cfg_output (size:128b/16B) */
+struct hwrm_queue_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */
+struct hwrm_queue_pfcenable_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */
+struct hwrm_queue_pfcenable_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */
+struct hwrm_queue_pfcenable_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
+ __le16 port_id;
+ u8 unused_0[2];
+};
+
+/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */
+struct hwrm_queue_pfcenable_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */
+struct hwrm_queue_pri2cos_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL
+ u8 port_id;
+ u8 unused_0[3];
+};
+
+/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */
+struct hwrm_queue_pri2cos_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 pri0_cos_queue_id;
+ u8 pri1_cos_queue_id;
+ u8 pri2_cos_queue_id;
+ u8 pri3_cos_queue_id;
+ u8 pri4_cos_queue_id;
+ u8 pri5_cos_queue_id;
+ u8 pri6_cos_queue_id;
+ u8 pri7_cos_queue_id;
+ u8 queue_cfg_info;
+ #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */
+struct hwrm_queue_pri2cos_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL
+ __le32 enables;
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL
+ u8 port_id;
+ u8 pri0_cos_queue_id;
+ u8 pri1_cos_queue_id;
+ u8 pri2_cos_queue_id;
+ u8 pri3_cos_queue_id;
+ u8 pri4_cos_queue_id;
+ u8 pri5_cos_queue_id;
+ u8 pri6_cos_queue_id;
+ u8 pri7_cos_queue_id;
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */
+struct hwrm_queue_pri2cos_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */
+struct hwrm_queue_cos2bw_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */
+struct hwrm_queue_cos2bw_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 queue_id0;
+ u8 unused_0;
+ __le16 unused_1;
+ __le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ struct {
+ u8 queue_id;
+ __le32 queue_id_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id_pri_lvl;
+ u8 queue_id_bw_weight;
+ } __packed cfg[7];
+ u8 unused_2[4];
+ u8 valid;
+};
+
+/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */
+struct hwrm_queue_cos2bw_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL
+ __le16 port_id;
+ u8 queue_id0;
+ u8 unused_0;
+ __le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ struct {
+ u8 queue_id;
+ __le32 queue_id_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id_pri_lvl;
+ u8 queue_id_bw_weight;
+ } __packed cfg[7];
+ u8 unused_1[5];
+};
+
+/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */
+struct hwrm_queue_cos2bw_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */
+struct hwrm_queue_dscp_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 port_id;
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */
+struct hwrm_queue_dscp_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_dscp_bits;
+ u8 unused_0;
+ __le16 max_entries;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */
+struct hwrm_queue_dscp2pri_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ u8 port_id;
+ u8 unused_0;
+ __le16 dest_data_buffer_size;
+ u8 unused_1[4];
+};
+
+/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */
+struct hwrm_queue_dscp2pri_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 entry_cnt;
+ u8 default_pri;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */
+struct hwrm_queue_dscp2pri_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le32 flags;
+ #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL
+ __le32 enables;
+ #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL
+ u8 port_id;
+ u8 default_pri;
+ __le16 entry_cnt;
+ u8 unused_0[4];
+};
+
+/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */
+struct hwrm_queue_dscp2pri_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_alloc_input (size:192b/24B) */
+struct hwrm_vnic_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
+ #define VNIC_ALLOC_REQ_FLAGS_VIRTIO_NET_FID_VALID 0x2UL
+ #define VNIC_ALLOC_REQ_FLAGS_VNIC_ID_VALID 0x4UL
+ __le16 virtio_net_fid;
+ __le16 vnic_id;
+};
+
+/* hwrm_vnic_alloc_output (size:128b/16B) */
+struct hwrm_vnic_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 vnic_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_vnic_update_input (size:256b/32B) */
+struct hwrm_vnic_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ __le32 enables;
+ #define VNIC_UPDATE_REQ_ENABLES_VNIC_STATE_VALID 0x1UL
+ #define VNIC_UPDATE_REQ_ENABLES_MRU_VALID 0x2UL
+ #define VNIC_UPDATE_REQ_ENABLES_METADATA_FORMAT_TYPE_VALID 0x4UL
+ u8 vnic_state;
+ #define VNIC_UPDATE_REQ_VNIC_STATE_NORMAL 0x0UL
+ #define VNIC_UPDATE_REQ_VNIC_STATE_DROP 0x1UL
+ #define VNIC_UPDATE_REQ_VNIC_STATE_LAST VNIC_UPDATE_REQ_VNIC_STATE_DROP
+ u8 metadata_format_type;
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_0 0x0UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_1 0x1UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_2 0x2UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_3 0x3UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4 0x4UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_LAST VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4
+ __le16 mru;
+ u8 unused_1[4];
+};
+
+/* hwrm_vnic_update_output (size:128b/16B) */
+struct hwrm_vnic_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_free_input (size:192b/24B) */
+struct hwrm_vnic_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_free_output (size:128b/16B) */
+struct hwrm_vnic_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_cfg_input (size:384b/48B) */
+struct hwrm_vnic_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
+ #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
+ #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
+ #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
+ #define VNIC_CFG_REQ_FLAGS_PORTCOS_MAPPING_MODE 0x80UL
+ __le32 enables;
+ #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
+ #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
+ #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
+ #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
+ #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
+ #define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL
+ #define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL
+ #define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL
+ #define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL
+ #define VNIC_CFG_REQ_ENABLES_L2_CQE_MODE 0x200UL
+ #define VNIC_CFG_REQ_ENABLES_RAW_QP_ID 0x400UL
+ __le16 vnic_id;
+ __le16 dflt_ring_grp;
+ __le16 rss_rule;
+ __le16 cos_rule;
+ __le16 lb_rule;
+ __le16 mru;
+ __le16 default_rx_ring_id;
+ __le16 default_cmpl_ring_id;
+ __le16 queue_id;
+ u8 rx_csum_v2_mode;
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_DEFAULT 0x0UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_ALL_OK 0x1UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX 0x2UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_LAST VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX
+ u8 l2_cqe_mode;
+ #define VNIC_CFG_REQ_L2_CQE_MODE_DEFAULT 0x0UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_COMPRESSED 0x1UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_MIXED 0x2UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_LAST VNIC_CFG_REQ_L2_CQE_MODE_MIXED
+ __le32 raw_qp_id;
+};
+
+/* hwrm_vnic_cfg_output (size:128b/16B) */
+struct hwrm_vnic_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_qcaps_input (size:192b/24B) */
+struct hwrm_vnic_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_qcaps_output (size:192b/24B) */
+struct hwrm_vnic_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ u8 unused_0[2];
+ __le32 flags;
+ #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
+ #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
+ #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
+ #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
+ #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
+ #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
+ #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL
+ #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
+ #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP 0x4000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CAP 0x8000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_XOR_CAP 0x10000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CHKSM_CAP 0x20000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP 0x40000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V3_CAP 0x80000UL
+ #define VNIC_QCAPS_RESP_FLAGS_L2_CQE_MODE_CAP 0x100000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP 0x200000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP 0x400000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP 0x800000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP 0x1000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_TRUSTED_VF_CAP 0x2000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_PORTCOS_MAPPING_MODE 0x4000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED 0x8000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_VNIC_RSS_HASH_MODE_CAP 0x10000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP 0x20000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP 0x40000000UL
+ __le16 max_aggs_supported;
+ u8 unused_1[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_tpa_cfg_input (size:384b/48B) */
+struct hwrm_vnic_tpa_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_PACK_AS_GRO 0x100UL
+ __le32 enables;
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN 0x10UL
+ __le16 vnic_id;
+ __le16 max_agg_segs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_LAST VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX
+ __le16 max_aggs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_LAST VNIC_TPA_CFG_REQ_MAX_AGGS_MAX
+ u8 unused_0[2];
+ __le32 max_agg_timer;
+ __le32 min_agg_len;
+ __le32 tnl_tpa_en_bitmap;
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN 0x1UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE 0x2UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_NVGRE 0x4UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE 0x8UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 0x10UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6 0x20UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR1 0x200UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR2 0x400UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR3 0x800UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL
+ u8 unused_1[4];
+};
+
+/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
+struct hwrm_vnic_tpa_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_tpa_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vnic_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */
+struct hwrm_vnic_tpa_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define VNIC_TPA_QCFG_RESP_FLAGS_TPA 0x1UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_ENCAP_TPA 0x2UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_RSC_WND_UPDATE 0x4UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO 0x8UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_ECN 0x10UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_IPID_CHECK 0x40UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_TTL_CHECK 0x80UL
+ __le16 max_agg_segs;
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_1 0x0UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_2 0x1UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_4 0x2UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_8 0x3UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX 0x1fUL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX
+ __le16 max_aggs;
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_1 0x0UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_2 0x1UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_4 0x2UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_8 0x3UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_16 0x4UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX 0x7UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX
+ __le32 max_agg_timer;
+ __le32 min_agg_len;
+ __le32 tnl_tpa_en_bitmap;
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN 0x1UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GENEVE 0x2UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_NVGRE 0x4UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE 0x8UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV4 0x10UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV6 0x20UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR1 0x200UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR2 0x400UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR3 0x800UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cfg_input (size:384b/48B) */
+struct hwrm_vnic_rss_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 hash_type;
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 0x80UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4 0x100UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 0x200UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6 0x400UL
+ __le16 vnic_id;
+ u8 ring_table_pair_index;
+ u8 hash_mode_flags;
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_2 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL
+ __le64 ring_grp_tbl_addr;
+ __le64 hash_key_tbl_addr;
+ __le16 rss_ctx_idx;
+ u8 flags;
+ #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL
+ #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL
+ #define VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT 0x4UL
+ u8 ring_select_mode;
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ 0x0UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_XOR 0x1UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_LAST VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
+ u8 unused_1[4];
+};
+
+/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
+struct hwrm_vnic_rss_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cfg_cmd_err (size:64b/8B) */
+struct hwrm_vnic_rss_cfg_cmd_err {
+ u8 code;
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY 0x1UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_LAST VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY
+ u8 unused_0[7];
+};
+
+/* hwrm_vnic_rss_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_rss_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 rss_ctx_idx;
+ __le16 vnic_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */
+struct hwrm_vnic_rss_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 hash_type;
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV6 0x20UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV4 0x80UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV4 0x100UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV6 0x200UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV6 0x400UL
+ u8 unused_0[4];
+ __le32 hash_key[10];
+ u8 hash_mode_flags;
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_DEFAULT 0x1UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_4 0x2UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_2 0x4UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL
+ u8 ring_select_mode;
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ 0x0UL
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_XOR 0x1UL
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_LAST VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
+ u8 unused_1[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
+struct hwrm_vnic_plcmodes_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_VIRTIO_PLACEMENT 0x40UL
+ __le32 enables;
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_MAX_BDS_VALID 0x8UL
+ __le32 vnic_id;
+ __le16 jumbo_thresh;
+ __le16 hds_offset;
+ __le16 hds_threshold;
+ __le16 max_bds;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
+struct hwrm_vnic_plcmodes_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_plcmodes_cfg_cmd_err (size:64b/8B) */
+struct hwrm_vnic_plcmodes_cfg_cmd_err {
+ u8 code;
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD 0x1UL
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_LAST VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD
+ u8 unused_0[7];
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 rss_cos_lb_ctx_id;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 rss_cos_lb_ctx_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_alloc_input (size:704b/88B) */
+struct hwrm_ring_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
+ #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
+ #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
+ #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
+ #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
+ #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
+ #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
+ #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RATE_PROFILE_VALID 0x1000UL
+ u8 ring_type;
+ #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
+ #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
+ #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL
+ #define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL
+ #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ
+ u8 cmpl_coal_cnt;
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_OFF 0x0UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_4 0x1UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_8 0x2UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_12 0x3UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_16 0x4UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_24 0x5UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_32 0x6UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_48 0x7UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64 0x8UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_96 0x9UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_128 0xaUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_192 0xbUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_256 0xcUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_320 0xdUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_384 0xeUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX 0xfUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_LAST RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX
+ __le16 flags;
+ #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL
+ #define RING_ALLOC_REQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x2UL
+ #define RING_ALLOC_REQ_FLAGS_NQ_DBR_PACING 0x4UL
+ #define RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE 0x8UL
+ __le64 page_tbl_addr;
+ __le32 fbo;
+ u8 page_size;
+ u8 page_tbl_depth;
+ __le16 schq_id;
+ __le32 length;
+ __le16 logical_id;
+ __le16 cmpl_ring_id;
+ __le16 queue_id;
+ __le16 rx_buf_size;
+ __le16 rx_ring_id;
+ __le16 nq_ring_id;
+ __le16 ring_arb_cfg;
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP 0x1UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ 0x2UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
+ __le16 steering_tag;
+ __le32 reserved3;
+ __le32 stat_ctx_id;
+ __le32 reserved4;
+ __le32 max_bw;
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 int_mode;
+ #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
+ #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL
+ #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
+ #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
+ #define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL
+ u8 mpc_chnls_type;
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TCE 0x0UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RCE 0x1UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TE_CFA 0x2UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA 0x3UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE 0x4UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_LAST RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE
+ u8 rx_rate_profile_sel;
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_DEFAULT 0x0UL
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE 0x1UL
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_LAST RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE
+ u8 unused_4;
+ __le64 cq_handle;
+};
+
+/* hwrm_ring_alloc_output (size:128b/16B) */
+struct hwrm_ring_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 ring_id;
+ __le16 logical_ring_id;
+ u8 push_buffer_index;
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_LAST RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
+ u8 unused_0[2];
+ u8 valid;
+};
+
+/* hwrm_ring_free_input (size:256b/32B) */
+struct hwrm_ring_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_FREE_REQ_RING_TYPE_TX 0x1UL
+ #define RING_FREE_REQ_RING_TYPE_RX 0x2UL
+ #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_FREE_REQ_RING_TYPE_RX_AGG 0x4UL
+ #define RING_FREE_REQ_RING_TYPE_NQ 0x5UL
+ #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_NQ
+ u8 flags;
+ #define RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID 0x1UL
+ #define RING_FREE_REQ_FLAGS_LAST RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID
+ __le16 ring_id;
+ __le32 prod_idx;
+ __le32 opaque;
+ __le32 unused_1;
+};
+
+/* hwrm_ring_free_output (size:128b/16B) */
+struct hwrm_ring_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_reset_input (size:192b/24B) */
+struct hwrm_ring_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
+ #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
+ #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_RESET_REQ_RING_TYPE_RX_RING_GRP 0x6UL
+ #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_RX_RING_GRP
+ u8 unused_0;
+ __le16 ring_id;
+ u8 unused_1[4];
+};
+
+/* hwrm_ring_reset_output (size:128b/16B) */
+struct hwrm_ring_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 push_buffer_index;
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_LAST RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
+ u8 unused_0[3];
+ u8 consumer_idx[3];
+ u8 valid;
+};
+
+/* hwrm_ring_aggint_qcaps_input (size:128b/16B) */
+struct hwrm_ring_aggint_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_ring_aggint_qcaps_output (size:384b/48B) */
+struct hwrm_ring_aggint_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 cmpl_params;
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN 0x1UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX 0x2UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET 0x4UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE 0x8UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR 0x10UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT 0x20UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR 0x40UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT 0x80UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT 0x100UL
+ __le32 nq_params;
+ #define RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN 0x1UL
+ __le16 num_cmpl_dma_aggr_min;
+ __le16 num_cmpl_dma_aggr_max;
+ __le16 num_cmpl_dma_aggr_during_int_min;
+ __le16 num_cmpl_dma_aggr_during_int_max;
+ __le16 cmpl_aggr_dma_tmr_min;
+ __le16 cmpl_aggr_dma_tmr_max;
+ __le16 cmpl_aggr_dma_tmr_during_int_min;
+ __le16 cmpl_aggr_dma_tmr_during_int_max;
+ __le16 int_lat_tmr_min_min;
+ __le16 int_lat_tmr_min_max;
+ __le16 int_lat_tmr_max_min;
+ __le16 int_lat_tmr_max_max;
+ __le16 num_cmpl_aggr_int_min;
+ __le16 num_cmpl_aggr_int_max;
+ __le16 timer_units;
+ u8 unused_0[1];
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */
+struct hwrm_ring_cmpl_ring_qaggint_params_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ __le16 flags;
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_MASK 0x3UL
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_SFT 0
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
+ u8 unused_0[4];
+};
+
+/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
+struct hwrm_ring_cmpl_ring_qaggint_params_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flags;
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ __le16 flags;
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ __le16 enables;
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR 0x1UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT 0x2UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR 0x4UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN 0x8UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX 0x10UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT 0x20UL
+ u8 unused_0[4];
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_grp_alloc_input (size:192b/24B) */
+struct hwrm_ring_grp_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 cr;
+ __le16 rr;
+ __le16 ar;
+ __le16 sc;
+};
+
+/* hwrm_ring_grp_alloc_output (size:128b/16B) */
+struct hwrm_ring_grp_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 ring_group_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_ring_grp_free_input (size:192b/24B) */
+struct hwrm_ring_grp_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 ring_group_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_ring_grp_free_output (size:128b/16B) */
+struct hwrm_ring_grp_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+#define DEFAULT_FLOW_ID 0xFFFFFFFFUL
+#define ROCEV1_FLOW_ID 0xFFFFFFFEUL
+#define ROCEV2_FLOW_ID 0xFFFFFFFDUL
+#define ROCEV2_CNP_FLOW_ID 0xFFFFFFFCUL
+
+/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */
+struct hwrm_cfa_l2_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_MASK 0x30UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_SFT 4
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_XDP_DISABLE 0x40UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_SOURCE_VALID 0x80UL
+ __le32 enables;
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS 0x20000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_NUM_VLANS 0x40000UL
+ u8 l2_addr[6];
+ u8 num_vlans;
+ u8 t_num_vlans;
+ u8 l2_addr_mask[6];
+ __le16 l2_ovlan;
+ __le16 l2_ovlan_mask;
+ __le16 l2_ivlan;
+ __le16 l2_ivlan_mask;
+ u8 unused_1[2];
+ u8 t_l2_addr[6];
+ u8 unused_2[2];
+ u8 t_l2_addr_mask[6];
+ __le16 t_l2_ovlan;
+ __le16 t_l2_ovlan_mask;
+ __le16 t_l2_ivlan;
+ __le16 t_l2_ivlan_mask;
+ u8 src_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG
+ u8 unused_3;
+ __le32 src_id;
+ u8 tunnel_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 unused_4;
+ __le16 dst_id;
+ __le16 mirror_vnic_id;
+ u8 pri_hint;
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN
+ u8 unused_5;
+ __le32 unused_6;
+ __le64 l2_filter_id_hint;
+};
+
+/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */
+struct hwrm_cfa_l2_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 l2_filter_id;
+ __le32 flow_id;
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_l2_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 l2_filter_id;
+};
+
+/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_l2_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_cfg_input (size:384b/48B) */
+struct hwrm_cfa_l2_filter_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_BYPASS_LKUP (0x1UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP (0x2UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP (0x3UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP
+ __le32 enables;
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_PROF_FUNC 0x4UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_L2_CONTEXT_ID 0x8UL
+ __le64 l2_filter_id;
+ __le32 dst_id;
+ __le32 new_mirror_vnic_id;
+ __le32 prof_func;
+ __le32 l2_context_id;
+};
+
+/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
+struct hwrm_cfa_l2_filter_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */
+struct hwrm_cfa_l2_set_rx_mask_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ __le32 mask;
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL
+ __le64 mc_tbl_addr;
+ __le32 num_mc_entries;
+ u8 unused_0[4];
+ __le64 vlan_tag_tbl_addr;
+ __le32 num_vlan_tags;
+ u8 unused_1[4];
+};
+
+/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */
+struct hwrm_cfa_l2_set_rx_mask_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */
+struct hwrm_cfa_l2_set_rx_mask_cmd_err {
+ u8 code;
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR
+ u8 unused_0[7];
+};
+
+/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */
+struct hwrm_cfa_tunnel_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ __le32 enables;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
+ __le64 l2_filter_id;
+ u8 l2_addr[6];
+ __le16 l2_ivlan;
+ __le32 l3_addr[4];
+ __le32 t_l3_addr[4];
+ u8 l3_addr_type;
+ u8 t_l3_addr_type;
+ u8 tunnel_type;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 tunnel_flags;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 0x4UL
+ __le32 vni;
+ __le32 dst_vnic_id;
+ __le32 mirror_vnic_id;
+};
+
+/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */
+struct hwrm_cfa_tunnel_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tunnel_filter_id;
+ __le32 flow_id;
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_tunnel_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 tunnel_filter_id;
+};
+
+/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_tunnel_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */
+struct hwrm_vxlan_ipv4_hdr {
+ u8 ver_hlen;
+ #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL
+ #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0
+ #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL
+ #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4
+ u8 tos;
+ __be16 ip_id;
+ __be16 flags_frag_offset;
+ u8 ttl;
+ u8 protocol;
+ __be32 src_ip_addr;
+ __be32 dest_ip_addr;
+};
+
+/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */
+struct hwrm_vxlan_ipv6_hdr {
+ __be32 ver_tc_flow_label;
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK
+ __be16 payload_len;
+ u8 next_hdr;
+ u8 ttl;
+ __be32 src_ip_addr[4];
+ __be32 dest_ip_addr[4];
+};
+
+/* hwrm_cfa_encap_data_vxlan (size:640b/80B) */
+struct hwrm_cfa_encap_data_vxlan {
+ u8 src_mac_addr[6];
+ __le16 unused_0;
+ u8 dst_mac_addr[6];
+ u8 num_vlan_tags;
+ u8 unused_1;
+ __be16 ovlan_tpid;
+ __be16 ovlan_tci;
+ __be16 ivlan_tpid;
+ __be16 ivlan_tci;
+ __le32 l3[10];
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL
+ #define CFA_ENCAP_DATA_VXLAN_L3_LAST CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 vni;
+ u8 hdr_rsvd0[3];
+ u8 hdr_rsvd1;
+ u8 hdr_flags;
+ u8 unused[3];
+};
+
+/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */
+struct hwrm_cfa_encap_record_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_EXTERNAL 0x2UL
+ u8 encap_type;
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_V4 0x9UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE
+ u8 unused_0[3];
+ __le32 encap_data[20];
+};
+
+/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */
+struct hwrm_cfa_encap_record_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 encap_record_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_encap_record_free_input (size:192b/24B) */
+struct hwrm_cfa_encap_record_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_record_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_cfa_encap_record_free_output (size:128b/16B) */
+struct hwrm_cfa_encap_record_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */
+struct hwrm_cfa_ntuple_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_ARP_REPLY 0x10UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX 0x20UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_NO_L2_CONTEXT 0x40UL
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX 0x80000UL
+ __le64 l2_filter_id;
+ u8 src_macaddr[6];
+ __be16 ethertype;
+ u8 ip_addr_type;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
+ u8 ip_protocol;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_ICMP 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_ICMPV6 0x3aUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD 0xffUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD
+ __le16 dst_id;
+ __le16 rfs_ring_tbl_idx;
+ u8 tunnel_type;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 pri_hint;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST
+ __be32 src_ipaddr[4];
+ __be32 src_ipaddr_mask[4];
+ __be32 dst_ipaddr[4];
+ __be32 dst_ipaddr_mask[4];
+ __be16 src_port;
+ __be16 src_port_mask;
+ __be16 dst_port;
+ __be16 dst_port_mask;
+ __le64 ntuple_filter_id_hint;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
+struct hwrm_cfa_ntuple_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 ntuple_filter_id;
+ __le32 flow_id;
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */
+struct hwrm_cfa_ntuple_filter_alloc_cmd_err {
+ u8 code;
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR
+ u8 unused_0[7];
+};
+
+/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_ntuple_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 ntuple_filter_id;
+};
+
+/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_ntuple_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */
+struct hwrm_cfa_ntuple_filter_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
+ __le32 flags;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_RFS_RING_IDX 0x2UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_NO_L2_CONTEXT 0x4UL
+ __le64 ntuple_filter_id;
+ __le32 new_dst_id;
+ __le32 new_mirror_vnic_id;
+ __le16 new_meter_instance_id;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_LAST CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID
+ u8 unused_1[6];
+};
+
+/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */
+struct hwrm_cfa_ntuple_filter_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */
+struct hwrm_cfa_decap_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL
+ __le32 enables;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+ __be32 tunnel_id;
+ u8 tunnel_type;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 unused_0;
+ __le16 unused_1;
+ u8 src_macaddr[6];
+ u8 unused_2[2];
+ u8 dst_macaddr[6];
+ __be16 ovlan_vid;
+ __be16 ivlan_vid;
+ __be16 t_ovlan_vid;
+ __be16 t_ivlan_vid;
+ __be16 ethertype;
+ u8 ip_addr_type;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
+ u8 ip_protocol;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
+ __le16 unused_3;
+ __le32 unused_4;
+ __be32 src_ipaddr[4];
+ __be32 dst_ipaddr[4];
+ __be16 src_port;
+ __be16 dst_port;
+ __le16 dst_id;
+ __le16 l2_ctxt_ref_id;
+};
+
+/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */
+struct hwrm_cfa_decap_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 decap_filter_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_decap_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 decap_filter_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_decap_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */
+struct hwrm_cfa_flow_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flags;
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x40UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x80UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_MATCH_VXLAN_IP_VNI 0x100UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_VHOST_ID_USE_VLAN 0x200UL
+ __le16 src_fid;
+ __le32 tunnel_handle;
+ __le16 action_flags;
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FLOW_AGING_ENABLED 0x800UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_PRI_HINT 0x1000UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NO_FLOW_COUNTER_ALLOC 0x2000UL
+ __le16 dst_fid;
+ __be16 l2_rewrite_vlan_tpid;
+ __be16 l2_rewrite_vlan_tci;
+ __le16 act_meter_id;
+ __le16 ref_flow_handle;
+ __be16 ethertype;
+ __be16 outer_vlan_tci;
+ __be16 dmac[3];
+ __be16 inner_vlan_tci;
+ __be16 smac[3];
+ u8 ip_dst_mask_len;
+ u8 ip_src_mask_len;
+ __be32 ip_dst[4];
+ __be32 ip_src[4];
+ __be16 l4_src_port;
+ __be16 l4_src_port_mask;
+ __be16 l4_dst_port;
+ __be16 l4_dst_port_mask;
+ __be32 nat_ip_address[4];
+ __be16 l2_rewrite_dmac[3];
+ __be16 nat_port;
+ __be16 l2_rewrite_smac[3];
+ u8 ip_proto;
+ u8 tunnel_type;
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+};
+
+/* hwrm_cfa_flow_alloc_output (size:256b/32B) */
+struct hwrm_cfa_flow_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flow_handle;
+ u8 unused_0[2];
+ __le32 flow_id;
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX
+ __le64 ext_flow_handle;
+ __le32 flow_counter_id;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_alloc_cmd_err (size:64b/8B) */
+struct hwrm_cfa_flow_alloc_cmd_err {
+ u8 code;
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_L2_CONTEXT_TCAM 0x1UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_ACTION_RECORD 0x2UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_COUNTER 0x3UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_WILD_CARD_TCAM 0x4UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_HASH_COLLISION 0x5UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_KEY_EXISTS 0x6UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB 0x7UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_LAST CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB
+ u8 unused_0[7];
+};
+
+/* hwrm_cfa_flow_free_input (size:256b/32B) */
+struct hwrm_cfa_flow_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flow_handle;
+ __le16 unused_0;
+ __le32 flow_counter_id;
+ __le64 ext_flow_handle;
+};
+
+/* hwrm_cfa_flow_free_output (size:256b/32B) */
+struct hwrm_cfa_flow_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 packet;
+ __le64 byte;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_info_input (size:256b/32B) */
+struct hwrm_cfa_flow_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flow_handle;
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_TX 0x3000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT_RX 0x9000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT_RX 0xa000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_RX 0xb000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX 0xc000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_LAST CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX
+ u8 unused_0[6];
+ __le64 ext_flow_handle;
+};
+
+/* hwrm_cfa_flow_info_output (size:5632b/704B) */
+struct hwrm_cfa_flow_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define CFA_FLOW_INFO_RESP_FLAGS_PATH_TX 0x1UL
+ #define CFA_FLOW_INFO_RESP_FLAGS_PATH_RX 0x2UL
+ u8 profile;
+ __le16 src_fid;
+ __le16 dst_fid;
+ __le16 l2_ctxt_id;
+ __le64 em_info;
+ __le64 tcam_info;
+ __le64 vfp_tcam_info;
+ __le16 ar_id;
+ __le16 flow_handle;
+ __le32 tunnel_handle;
+ __le16 flow_timer;
+ u8 unused_0[6];
+ __le32 flow_key_data[130];
+ __le32 flow_action_info[30];
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_stats_input (size:640b/80B) */
+struct hwrm_cfa_flow_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 num_flows;
+ __le16 flow_handle_0;
+ __le16 flow_handle_1;
+ __le16 flow_handle_2;
+ __le16 flow_handle_3;
+ __le16 flow_handle_4;
+ __le16 flow_handle_5;
+ __le16 flow_handle_6;
+ __le16 flow_handle_7;
+ __le16 flow_handle_8;
+ __le16 flow_handle_9;
+ u8 unused_0[2];
+ __le32 flow_id_0;
+ __le32 flow_id_1;
+ __le32 flow_id_2;
+ __le32 flow_id_3;
+ __le32 flow_id_4;
+ __le32 flow_id_5;
+ __le32 flow_id_6;
+ __le32 flow_id_7;
+ __le32 flow_id_8;
+ __le32 flow_id_9;
+};
+
+/* hwrm_cfa_flow_stats_output (size:1408b/176B) */
+struct hwrm_cfa_flow_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 packet_0;
+ __le64 packet_1;
+ __le64 packet_2;
+ __le64 packet_3;
+ __le64 packet_4;
+ __le64 packet_5;
+ __le64 packet_6;
+ __le64 packet_7;
+ __le64 packet_8;
+ __le64 packet_9;
+ __le64 byte_0;
+ __le64 byte_1;
+ __le64 byte_2;
+ __le64 byte_3;
+ __le64 byte_4;
+ __le64 byte_5;
+ __le64 byte_6;
+ __le64 byte_7;
+ __le64 byte_8;
+ __le64 byte_9;
+ __le16 flow_hits;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */
+struct hwrm_cfa_vfr_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ __le16 reserved;
+ u8 unused_0[4];
+ char vfr_name[32];
+};
+
+/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */
+struct hwrm_cfa_vfr_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 rx_cfa_code;
+ __le16 tx_cfa_action;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_vfr_free_input (size:448b/56B) */
+struct hwrm_cfa_vfr_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ char vfr_name[32];
+ __le16 vf_id;
+ __le16 reserved;
+ u8 unused_0[4];
+};
+
+/* hwrm_cfa_vfr_free_output (size:128b/16B) */
+struct hwrm_cfa_vfr_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_eem_qcaps_input (size:192b/24B) */
+struct hwrm_cfa_eem_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_QCAPS_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL
+ __le32 unused_0;
+};
+
+/* hwrm_cfa_eem_qcaps_output (size:320b/40B) */
+struct hwrm_cfa_eem_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_QCAPS_RESP_FLAGS_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x4UL
+ #define CFA_EEM_QCAPS_RESP_FLAGS_DETACHED_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x8UL
+ __le32 unused_0;
+ __le32 supported;
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY0_TABLE 0x1UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY1_TABLE 0x2UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_RECORD_TABLE 0x4UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE 0x8UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_FID_TABLE 0x10UL
+ __le32 max_entries_supported;
+ __le16 key_entry_size;
+ __le16 record_entry_size;
+ __le16 efc_entry_size;
+ __le16 fid_entry_size;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_eem_cfg_input (size:384b/48B) */
+struct hwrm_cfa_eem_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_EEM_CFG_REQ_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_CFG_REQ_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_CFG_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL
+ #define CFA_EEM_CFG_REQ_FLAGS_SECONDARY_PF 0x8UL
+ __le16 group_id;
+ __le16 unused_0;
+ __le32 num_entries;
+ __le32 unused_1;
+ __le16 key0_ctx_id;
+ __le16 key1_ctx_id;
+ __le16 record_ctx_id;
+ __le16 efc_ctx_id;
+ __le16 fid_ctx_id;
+ __le16 unused_2;
+ __le32 unused_3;
+};
+
+/* hwrm_cfa_eem_cfg_output (size:128b/16B) */
+struct hwrm_cfa_eem_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_eem_qcfg_input (size:192b/24B) */
+struct hwrm_cfa_eem_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_EEM_QCFG_REQ_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCFG_REQ_FLAGS_PATH_RX 0x2UL
+ __le32 unused_0;
+};
+
+/* hwrm_cfa_eem_qcfg_output (size:256b/32B) */
+struct hwrm_cfa_eem_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define CFA_EEM_QCFG_RESP_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL
+ __le32 num_entries;
+ __le16 key0_ctx_id;
+ __le16 key1_ctx_id;
+ __le16 record_ctx_id;
+ __le16 efc_ctx_id;
+ __le16 fid_ctx_id;
+ u8 unused_2[5];
+ u8 valid;
+};
+
+/* hwrm_cfa_eem_op_input (size:192b/24B) */
+struct hwrm_cfa_eem_op_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_EEM_OP_REQ_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_OP_REQ_FLAGS_PATH_RX 0x2UL
+ __le16 unused_0;
+ __le16 op;
+ #define CFA_EEM_OP_REQ_OP_RESERVED 0x0UL
+ #define CFA_EEM_OP_REQ_OP_EEM_DISABLE 0x1UL
+ #define CFA_EEM_OP_REQ_OP_EEM_ENABLE 0x2UL
+ #define CFA_EEM_OP_REQ_OP_EEM_CLEANUP 0x3UL
+ #define CFA_EEM_OP_REQ_OP_LAST CFA_EEM_OP_REQ_OP_EEM_CLEANUP
+};
+
+/* hwrm_cfa_eem_op_output (size:128b/16B) */
+struct hwrm_cfa_eem_op_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_adv_flow_mgnt_qcaps_input (size:256b/32B) */
+struct hwrm_cfa_adv_flow_mgnt_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 unused_0[4];
+};
+
+/* hwrm_cfa_adv_flow_mgnt_qcaps_output (size:128b/16B) */
+struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TRUFLOW_CAPABLE 0x8000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED 0x10000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_LAG_SUPPORTED 0x20000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_NO_L2CTX_SUPPORTED 0x40000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NIC_FLOW_STATS_SUPPORTED 0x80000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED 0x100000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED 0x200000UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
+ u8 tunnel_next_proto;
+ u8 unused_0[6];
+};
+
+/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ __be16 tunnel_dst_port_val;
+ u8 upar_in_use;
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR0 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR1 0x2UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR2 0x4UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR3 0x8UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR4 0x10UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR5 0x20UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR6 0x40UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR7 0x80UL
+ u8 status;
+ #define TUNNEL_DST_PORT_QUERY_RESP_STATUS_CHIP_LEVEL 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_STATUS_FUNC_LEVEL 0x2UL
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
+ u8 tunnel_next_proto;
+ __be16 tunnel_dst_port_val;
+ u8 unused_0[4];
+};
+
+/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ u8 error_info;
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_SUCCESS 0x0UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ALLOCATED 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_NO_RESOURCE 0x2UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED 0x3UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED
+ u8 upar_in_use;
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR0 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR1 0x2UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR2 0x4UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR3 0x8UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR4 0x10UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR5 0x20UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR6 0x40UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR7 0x80UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
+ u8 tunnel_next_proto;
+ __le16 tunnel_dst_port_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 error_info;
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_SUCCESS 0x0UL
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_OWNER 0x1UL
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED 0x2UL
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED
+ u8 unused_1[6];
+ u8 valid;
+};
+
+/* ctx_hw_stats (size:1280b/160B) */
+struct ctx_hw_stats {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 tpa_pkts;
+ __le64 tpa_bytes;
+ __le64 tpa_events;
+ __le64 tpa_aborts;
+};
+
+/* ctx_hw_stats_ext (size:1408b/176B) */
+struct ctx_hw_stats_ext {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
+};
+
+/* hwrm_stat_ctx_alloc_input (size:384b/48B) */
+struct hwrm_stat_ctx_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 stats_dma_addr;
+ __le32 update_period_ms;
+ u8 stat_ctx_flags;
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_DUP_HOST_BUF 0x2UL
+ u8 unused_0;
+ __le16 stats_dma_length;
+ __le16 flags;
+ #define STAT_CTX_ALLOC_REQ_FLAGS_STEERING_TAG_VALID 0x1UL
+ __le16 steering_tag;
+ __le32 stat_ctx_id;
+ __le16 alloc_seq_id;
+ u8 unused_1[6];
+};
+
+/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
+struct hwrm_stat_ctx_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_free_input (size:192b/24B) */
+struct hwrm_stat_ctx_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_stat_ctx_free_output (size:128b/16B) */
+struct hwrm_stat_ctx_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_query_input (size:192b/24B) */
+struct hwrm_stat_ctx_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 flags;
+ #define STAT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
+};
+
+/* hwrm_stat_ctx_query_output (size:1408b/176B) */
+struct hwrm_stat_ctx_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_stat_ext_ctx_query_input (size:192b/24B) */
+struct hwrm_stat_ext_ctx_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 flags;
+ #define STAT_EXT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
+};
+
+/* hwrm_stat_ext_ctx_query_output (size:1536b/192B) */
+struct hwrm_stat_ext_ctx_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
+struct hwrm_stat_ctx_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */
+struct hwrm_stat_ctx_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_pcie_qstats_input (size:256b/32B) */
+struct hwrm_pcie_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 pcie_stat_size;
+ u8 unused_0[6];
+ __le64 pcie_stat_host_addr;
+};
+
+/* hwrm_pcie_qstats_output (size:128b/16B) */
+struct hwrm_pcie_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 pcie_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* pcie_ctx_hw_stats (size:768b/96B) */
+struct pcie_ctx_hw_stats {
+ __le64 pcie_pl_signal_integrity;
+ __le64 pcie_dl_signal_integrity;
+ __le64 pcie_tl_signal_integrity;
+ __le64 pcie_link_integrity;
+ __le64 pcie_tx_traffic_rate;
+ __le64 pcie_rx_traffic_rate;
+ __le64 pcie_tx_dllp_statistics;
+ __le64 pcie_rx_dllp_statistics;
+ __le64 pcie_equalization_time;
+ __le32 pcie_ltssm_histogram[4];
+ __le64 pcie_recovery_histogram;
+};
+
+/* pcie_ctx_hw_stats_v2 (size:4096b/512B) */
+struct pcie_ctx_hw_stats_v2 {
+ __le64 pcie_pl_signal_integrity;
+ __le64 pcie_dl_signal_integrity;
+ __le64 pcie_tl_signal_integrity;
+ __le64 pcie_link_integrity;
+ __le64 pcie_tx_traffic_rate;
+ __le64 pcie_rx_traffic_rate;
+ __le64 pcie_tx_dllp_statistics;
+ __le64 pcie_rx_dllp_statistics;
+ __le64 pcie_equalization_time;
+ __le32 pcie_ltssm_histogram[4];
+ __le64 pcie_recovery_histogram;
+ __le32 pcie_tl_credit_nph_histogram[8];
+ __le32 pcie_tl_credit_ph_histogram[8];
+ __le32 pcie_tl_credit_pd_histogram[8];
+ __le32 pcie_cmpl_latest_times[4];
+ __le32 pcie_cmpl_longest_time;
+ __le32 pcie_cmpl_shortest_time;
+ __le32 unused_0[2];
+ __le32 pcie_cmpl_latest_headers[4][4];
+ __le32 pcie_cmpl_longest_headers[4][4];
+ __le32 pcie_cmpl_shortest_headers[4][4];
+ __le32 pcie_wr_latency_histogram[12];
+ __le32 pcie_wr_latency_all_normal_count;
+ __le32 unused_1;
+ __le64 pcie_posted_packet_count;
+ __le64 pcie_non_posted_packet_count;
+ __le64 pcie_other_packet_count;
+ __le64 pcie_blocked_packet_count;
+ __le64 pcie_cmpl_packet_count;
+};
+
+/* hwrm_stat_generic_qstats_input (size:256b/32B) */
+struct hwrm_stat_generic_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 generic_stat_size;
+ u8 flags;
+ #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[5];
+ __le64 generic_stat_host_addr;
+};
+
+/* hwrm_stat_generic_qstats_output (size:128b/16B) */
+struct hwrm_stat_generic_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 generic_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* generic_sw_hw_stats (size:1472b/184B) */
+struct generic_sw_hw_stats {
+ __le64 pcie_statistics_tx_tlp;
+ __le64 pcie_statistics_rx_tlp;
+ __le64 pcie_credit_fc_hdr_posted;
+ __le64 pcie_credit_fc_hdr_nonposted;
+ __le64 pcie_credit_fc_hdr_cmpl;
+ __le64 pcie_credit_fc_data_posted;
+ __le64 pcie_credit_fc_data_nonposted;
+ __le64 pcie_credit_fc_data_cmpl;
+ __le64 pcie_credit_fc_tgt_nonposted;
+ __le64 pcie_credit_fc_tgt_data_posted;
+ __le64 pcie_credit_fc_tgt_hdr_posted;
+ __le64 pcie_credit_fc_cmpl_hdr_posted;
+ __le64 pcie_credit_fc_cmpl_data_posted;
+ __le64 pcie_cmpl_longest;
+ __le64 pcie_cmpl_shortest;
+ __le64 cache_miss_count_cfcq;
+ __le64 cache_miss_count_cfcs;
+ __le64 cache_miss_count_cfcc;
+ __le64 cache_miss_count_cfcm;
+ __le64 hw_db_recov_dbs_dropped;
+ __le64 hw_db_recov_drops_serviced;
+ __le64 hw_db_recov_dbs_recovered;
+ __le64 hw_db_recov_oo_drop_count;
+};
+
+/* hwrm_fw_reset_input (size:192b/24B) */
+struct hwrm_fw_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT 0x7UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION 0x8UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION
+ u8 selfrst_status;
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
+ #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE
+ u8 host_idx;
+ u8 flags;
+ #define FW_RESET_REQ_FLAGS_RESET_GRACEFUL 0x1UL
+ #define FW_RESET_REQ_FLAGS_FW_ACTIVATION 0x2UL
+ u8 unused_0[4];
+};
+
+/* hwrm_fw_reset_output (size:128b/16B) */
+struct hwrm_fw_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
+ #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_fw_qstatus_input (size:192b/24B) */
+struct hwrm_fw_qstatus_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_LAST FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_qstatus_output (size:128b/16B) */
+struct hwrm_fw_qstatus_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER 0x3UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER
+ u8 nvm_option_action_status;
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_NONE 0x0UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_HOTRESET 0x1UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_WARMBOOT 0x2UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT 0x3UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_LAST FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_fw_set_time_input (size:256b/32B) */
+struct hwrm_fw_set_time_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 year;
+ #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
+ #define FW_SET_TIME_REQ_YEAR_LAST FW_SET_TIME_REQ_YEAR_UNKNOWN
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 minute;
+ u8 second;
+ u8 unused_0;
+ __le16 millisecond;
+ __le16 zone;
+ #define FW_SET_TIME_REQ_ZONE_UTC 0
+ #define FW_SET_TIME_REQ_ZONE_UNKNOWN 65535
+ #define FW_SET_TIME_REQ_ZONE_LAST FW_SET_TIME_REQ_ZONE_UNKNOWN
+ u8 unused_1[4];
+};
+
+/* hwrm_fw_set_time_output (size:128b/16B) */
+struct hwrm_fw_set_time_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_struct_hdr (size:128b/16B) */
+struct hwrm_struct_hdr {
+ __le16 struct_id;
+ #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
+ #define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL
+ #define STRUCT_HDR_STRUCT_ID_PEER_MMAP 0x429UL
+ #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
+ #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
+ #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
+ #define STRUCT_HDR_STRUCT_ID_MSIX_PER_VF 0xc8UL
+ #define STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_COUNT 0x12cUL
+ #define STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_BOUND 0x12dUL
+ #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_BOUND
+ __le16 len;
+ u8 version;
+ #define STRUCT_HDR_VERSION_0 0x0UL
+ #define STRUCT_HDR_VERSION_1 0x1UL
+ #define STRUCT_HDR_VERSION_LAST STRUCT_HDR_VERSION_1
+ u8 count;
+ __le16 subtype;
+ __le16 next_offset;
+ #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL
+ u8 unused_0[6];
+};
+
+/* hwrm_struct_data_dcbx_app (size:64b/8B) */
+struct hwrm_struct_data_dcbx_app {
+ __be16 protocol_id;
+ u8 protocol_selector;
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_LAST STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT
+ u8 priority;
+ u8 valid;
+ u8 unused_0[3];
+};
+
+/* hwrm_fw_set_structured_data_input (size:256b/32B) */
+struct hwrm_fw_set_structured_data_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le16 data_len;
+ u8 hdr_cnt;
+ u8 unused_0[5];
+};
+
+/* hwrm_fw_set_structured_data_output (size:128b/16B) */
+struct hwrm_fw_set_structured_data_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fw_set_structured_data_cmd_err (size:64b/8B) */
+struct hwrm_fw_set_structured_data_cmd_err {
+ u8 code;
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_get_structured_data_input (size:256b/32B) */
+struct hwrm_fw_get_structured_data_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ __le16 data_len;
+ __le16 structure_id;
+ __le16 subtype;
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_UNUSED 0x0UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL 0xffffUL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_OPERATIONAL 0x102UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_LAST FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL
+ u8 count;
+ u8 unused_0;
+};
+
+/* hwrm_fw_get_structured_data_output (size:128b/16B) */
+struct hwrm_fw_get_structured_data_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 hdr_cnt;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_fw_get_structured_data_cmd_err (size:64b/8B) */
+struct hwrm_fw_get_structured_data_cmd_err {
+ u8 code;
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_livepatch_query_input (size:192b/24B) */
+struct hwrm_fw_livepatch_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 fw_target;
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_COMMON_FW 0x1UL
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW 0x2UL
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_LAST FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_livepatch_query_output (size:640b/80B) */
+struct hwrm_fw_livepatch_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ char install_ver[32];
+ char active_ver[32];
+ __le16 status_flags;
+ #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL 0x1UL
+ #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE 0x2UL
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_fw_livepatch_input (size:256b/32B) */
+struct hwrm_fw_livepatch_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 opcode;
+ #define FW_LIVEPATCH_REQ_OPCODE_ACTIVATE 0x1UL
+ #define FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE 0x2UL
+ #define FW_LIVEPATCH_REQ_OPCODE_LAST FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE
+ u8 fw_target;
+ #define FW_LIVEPATCH_REQ_FW_TARGET_COMMON_FW 0x1UL
+ #define FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW 0x2UL
+ #define FW_LIVEPATCH_REQ_FW_TARGET_LAST FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW
+ u8 loadtype;
+ #define FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL 0x1UL
+ #define FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT 0x2UL
+ #define FW_LIVEPATCH_REQ_LOADTYPE_LAST FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT
+ u8 flags;
+ __le32 patch_len;
+ __le64 host_addr;
+};
+
+/* hwrm_fw_livepatch_output (size:128b/16B) */
+struct hwrm_fw_livepatch_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fw_livepatch_cmd_err (size:64b/8B) */
+struct hwrm_fw_livepatch_cmd_err {
+ u8 code;
+ #define FW_LIVEPATCH_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_OPCODE 0x1UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_TARGET 0x2UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_SUPPORTED 0x3UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_INSTALLED 0x4UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_PATCHED 0x5UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_AUTH_FAIL 0x6UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_HEADER 0x7UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_SIZE 0x8UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED 0x9UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_LAST FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED
+ u8 unused_0[7];
+};
+
+/* hwrm_exec_fwd_resp_input (size:1024b/128B) */
+struct hwrm_exec_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[26];
+ __le16 encap_resp_target_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_exec_fwd_resp_output (size:128b/16B) */
+struct hwrm_exec_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_reject_fwd_resp_input (size:1024b/128B) */
+struct hwrm_reject_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[26];
+ __le16 encap_resp_target_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_reject_fwd_resp_output (size:128b/16B) */
+struct hwrm_reject_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fwd_resp_input (size:1024b/128B) */
+struct hwrm_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_resp_target_id;
+ __le16 encap_resp_cmpl_ring;
+ __le16 encap_resp_len;
+ u8 unused_0;
+ u8 unused_1;
+ __le64 encap_resp_addr;
+ __le32 encap_resp[24];
+};
+
+/* hwrm_fwd_resp_output (size:128b/16B) */
+struct hwrm_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */
+struct hwrm_fwd_async_event_cmpl_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_async_event_target_id;
+ u8 unused_0[6];
+ __le32 encap_async_event_cmpl[4];
+};
+
+/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */
+struct hwrm_fwd_async_event_cmpl_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_temp_monitor_query_input (size:128b/16B) */
+struct hwrm_temp_monitor_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_temp_monitor_query_output (size:192b/24B) */
+struct hwrm_temp_monitor_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 temp;
+ u8 phy_temp;
+ u8 om_temp;
+ u8 flags;
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE 0x1UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE 0x2UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT 0x4UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE 0x8UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_EXT_TEMP_FIELDS_AVAILABLE 0x10UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_THRESHOLD_VALUES_AVAILABLE 0x20UL
+ u8 temp2;
+ u8 phy_temp2;
+ u8 om_temp2;
+ u8 warn_threshold;
+ u8 critical_threshold;
+ u8 fatal_threshold;
+ u8 shutdown_threshold;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_alloc_input (size:512b/64B) */
+struct hwrm_wol_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE 0x4UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR 0x8UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR 0x10UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE 0x20UL
+ __le16 port_id;
+ u8 wol_type;
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_LAST WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID
+ u8 unused_0[5];
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_buf_size;
+ __le16 pattern_mask_size;
+ u8 unused_1[4];
+ __le64 pattern_buf_addr;
+ __le64 pattern_mask_addr;
+};
+
+/* hwrm_wol_filter_alloc_output (size:128b/16B) */
+struct hwrm_wol_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_free_input (size:256b/32B) */
+struct hwrm_wol_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS 0x1UL
+ __le32 enables;
+ #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL
+ __le16 port_id;
+ u8 wol_filter_id;
+ u8 unused_0[5];
+};
+
+/* hwrm_wol_filter_free_output (size:128b/16B) */
+struct hwrm_wol_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_qcfg_input (size:448b/56B) */
+struct hwrm_wol_filter_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 handle;
+ u8 unused_0[4];
+ __le64 pattern_buf_addr;
+ __le16 pattern_buf_size;
+ u8 unused_1[6];
+ __le64 pattern_mask_addr;
+ __le16 pattern_mask_size;
+ u8 unused_2[6];
+};
+
+/* hwrm_wol_filter_qcfg_output (size:256b/32B) */
+struct hwrm_wol_filter_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 next_handle;
+ u8 wol_filter_id;
+ u8 wol_type;
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_LAST WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID
+ __le32 unused_0;
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_size;
+ __le16 pattern_mask_size;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_wol_reason_qcfg_input (size:320b/40B) */
+struct hwrm_wol_reason_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+ __le64 wol_pkt_buf_addr;
+ __le16 wol_pkt_buf_size;
+ u8 unused_1[6];
+};
+
+/* hwrm_wol_reason_qcfg_output (size:128b/16B) */
+struct hwrm_wol_reason_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 wol_reason;
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_LAST WOL_REASON_QCFG_RESP_WOL_REASON_INVALID
+ u8 wol_pkt_len;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_dbg_read_direct_input (size:256b/32B) */
+struct hwrm_dbg_read_direct_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 read_addr;
+ __le32 read_len32;
+};
+
+/* hwrm_dbg_read_direct_output (size:128b/16B) */
+struct hwrm_dbg_read_direct_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 crc32;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_dbg_qcaps_input (size:192b/24B) */
+struct hwrm_dbg_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_dbg_qcaps_output (size:192b/24B) */
+struct hwrm_dbg_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 coredump_component_disable_caps;
+ #define DBG_QCAPS_RESP_COREDUMP_COMPONENT_DISABLE_CAPS_NVRAM 0x1UL
+ __le32 flags;
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL
+ #define DBG_QCAPS_RESP_FLAGS_USEQ 0x8UL
+ #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_DDR 0x10UL
+ #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_CAPTURE 0x20UL
+ #define DBG_QCAPS_RESP_FLAGS_PTRACE 0x40UL
+ #define DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED 0x80UL
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_dbg_qcfg_input (size:192b/24B) */
+struct hwrm_dbg_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 flags;
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_MASK 0x3UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_SFT 0
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_NVM 0x0UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR 0x1UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR 0x2UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_LAST DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR
+ __le32 coredump_component_disable_flags;
+ #define DBG_QCFG_REQ_COREDUMP_COMPONENT_DISABLE_FLAGS_NVRAM 0x1UL
+};
+
+/* hwrm_dbg_qcfg_output (size:256b/32B) */
+struct hwrm_dbg_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 coredump_size;
+ __le32 flags;
+ #define DBG_QCFG_RESP_FLAGS_UART_LOG 0x1UL
+ #define DBG_QCFG_RESP_FLAGS_UART_LOG_SECONDARY 0x2UL
+ #define DBG_QCFG_RESP_FLAGS_FW_TRACE 0x4UL
+ #define DBG_QCFG_RESP_FLAGS_FW_TRACE_SECONDARY 0x8UL
+ #define DBG_QCFG_RESP_FLAGS_DEBUG_NOTIFY 0x10UL
+ #define DBG_QCFG_RESP_FLAGS_JTAG_DEBUG 0x20UL
+ __le16 async_cmpl_ring;
+ u8 unused_2[2];
+ __le32 crashdump_size;
+ u8 unused_3[3];
+ u8 valid;
+};
+
+/* hwrm_dbg_crashdump_medium_cfg_input (size:320b/40B) */
+struct hwrm_dbg_crashdump_medium_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 output_dest_flags;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR 0x1UL
+ __le16 pg_size_lvl;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_MASK 0x3UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_SFT 0
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_0 0x0UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_1 0x1UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2 0x2UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_MASK 0x1cUL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_SFT 2
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K (0x0UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K (0x1UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K (0x2UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_2M (0x3UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8M (0x4UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G (0x5UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_MASK 0xffe0UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_SFT 5
+ __le32 size;
+ __le32 coredump_component_disable_flags;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_NVRAM 0x1UL
+ __le32 unused_0;
+ __le64 pbl;
+};
+
+/* hwrm_dbg_crashdump_medium_cfg_output (size:128b/16B) */
+struct hwrm_dbg_crashdump_medium_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* coredump_segment_record (size:128b/16B) */
+struct coredump_segment_record {
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 max_instances;
+ u8 version_hi;
+ u8 version_low;
+ u8 seg_flags;
+ u8 compress_flags;
+ #define SFLAG_COMPRESSED_ZLIB 0x1UL
+ u8 unused_0[2];
+ __le32 segment_len;
+};
+
+/* hwrm_dbg_coredump_list_input (size:256b/32B) */
+struct hwrm_dbg_coredump_list_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+ __le16 seq_no;
+ u8 flags;
+ #define DBG_COREDUMP_LIST_REQ_FLAGS_CRASHDUMP 0x1UL
+ u8 unused_0[1];
+};
+
+/* hwrm_dbg_coredump_list_output (size:128b/16B) */
+struct hwrm_dbg_coredump_list_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define DBG_COREDUMP_LIST_RESP_FLAGS_MORE 0x1UL
+ u8 unused_0;
+ __le16 total_segments;
+ __le16 data_len;
+ u8 unused_1;
+ u8 valid;
+};
+
+/* hwrm_dbg_coredump_initiate_input (size:256b/32B) */
+struct hwrm_dbg_coredump_initiate_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 instance;
+ __le16 unused_0;
+ u8 seg_flags;
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_LIVE_DATA 0x1UL
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_CRASH_DATA 0x2UL
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE 0x4UL
+ u8 unused_1[7];
+};
+
+/* hwrm_dbg_coredump_initiate_output (size:128b/16B) */
+struct hwrm_dbg_coredump_initiate_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* coredump_data_hdr (size:128b/16B) */
+struct coredump_data_hdr {
+ __le32 address;
+ __le32 flags_length;
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_MASK 0xffffffUL
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_SFT 0
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_INDIRECT_ACCESS 0x1000000UL
+ __le32 instance;
+ __le32 next_offset;
+};
+
+/* hwrm_dbg_coredump_retrieve_input (size:448b/56B) */
+struct hwrm_dbg_coredump_retrieve_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+ __le32 unused_0;
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 instance;
+ __le16 unused_1;
+ u8 seg_flags;
+ u8 unused_2;
+ __le16 unused_3;
+ __le32 unused_4;
+ __le32 seq_no;
+ __le32 unused_5;
+};
+
+/* hwrm_dbg_coredump_retrieve_output (size:128b/16B) */
+struct hwrm_dbg_coredump_retrieve_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define DBG_COREDUMP_RETRIEVE_RESP_FLAGS_MORE 0x1UL
+ u8 unused_0;
+ __le16 data_len;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_dbg_ring_info_get_input (size:192b/24B) */
+struct hwrm_dbg_ring_info_get_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_TX 0x1UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_RX 0x2UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_NQ 0x3UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_NQ
+ u8 unused_0[3];
+ __le32 fw_ring_id;
+};
+
+/* hwrm_dbg_ring_info_get_output (size:192b/24B) */
+struct hwrm_dbg_ring_info_get_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 producer_index;
+ __le32 consumer_index;
+ __le32 cag_vector_ctrl;
+ __le16 st_tag;
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_dbg_log_buffer_flush_input (size:192b/24B) */
+struct hwrm_dbg_log_buffer_flush_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE 0x0UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE 0x1UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE 0x2UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE 0x3UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE 0x4UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE 0x5UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE 0x6UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE 0x7UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE 0x8UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE 0x9UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE 0xaUL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE 0xbUL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_LAST DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE
+ u8 unused_1[2];
+ __le32 flags;
+ #define DBG_LOG_BUFFER_FLUSH_REQ_FLAGS_FLUSH_ALL_BUFFERS 0x1UL
+};
+
+/* hwrm_dbg_log_buffer_flush_output (size:128b/16B) */
+struct hwrm_dbg_log_buffer_flush_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 current_buffer_offset;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_nvm_read_input (size:320b/40B) */
+struct hwrm_nvm_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le16 dir_idx;
+ u8 unused_0[2];
+ __le32 offset;
+ __le32 len;
+ u8 unused_1[4];
+};
+
+/* hwrm_nvm_read_output (size:128b/16B) */
+struct hwrm_nvm_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */
+struct hwrm_nvm_get_dir_entries_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+};
+
+/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */
+struct hwrm_nvm_get_dir_entries_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_info_input (size:128b/16B) */
+struct hwrm_nvm_get_dir_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_nvm_get_dir_info_output (size:192b/24B) */
+struct hwrm_nvm_get_dir_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 entries;
+ __le32 entry_length;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_write_input (size:448b/56B) */
+struct hwrm_nvm_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 dir_data_length;
+ __le16 option;
+ __le16 flags;
+ #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
+ #define NVM_WRITE_REQ_FLAGS_BATCH_MODE 0x2UL
+ #define NVM_WRITE_REQ_FLAGS_BATCH_LAST 0x4UL
+ #define NVM_WRITE_REQ_FLAGS_SKIP_CRID_CHECK 0x8UL
+ __le32 dir_item_length;
+ __le32 offset;
+ __le32 len;
+ __le32 unused_0;
+};
+
+/* hwrm_nvm_write_output (size:128b/16B) */
+struct hwrm_nvm_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 dir_item_length;
+ __le16 dir_idx;
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_nvm_write_cmd_err (size:64b/8B) */
+struct hwrm_nvm_write_cmd_err {
+ u8 code;
+ #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_WRITE_CMD_ERR_CODE_LAST NVM_WRITE_CMD_ERR_CODE_NO_SPACE
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_modify_input (size:320b/40B) */
+struct hwrm_nvm_modify_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_idx;
+ __le16 flags;
+ #define NVM_MODIFY_REQ_FLAGS_BATCH_MODE 0x1UL
+ #define NVM_MODIFY_REQ_FLAGS_BATCH_LAST 0x2UL
+ __le32 offset;
+ __le32 len;
+ u8 unused_1[4];
+};
+
+/* hwrm_nvm_modify_output (size:128b/16B) */
+struct hwrm_nvm_modify_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */
+struct hwrm_nvm_find_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL
+ __le16 dir_idx;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ u8 opt_ordinal;
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_LAST NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT
+ u8 unused_0[3];
+};
+
+/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */
+struct hwrm_nvm_find_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 dir_item_length;
+ __le32 dir_data_length;
+ __le32 fw_ver;
+ __le16 dir_ordinal;
+ __le16 dir_idx;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */
+struct hwrm_nvm_erase_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_idx;
+ u8 unused_0[6];
+};
+
+/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */
+struct hwrm_nvm_erase_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dev_info_input (size:192b/24B) */
+struct hwrm_nvm_get_dev_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 flags;
+ #define NVM_GET_DEV_INFO_REQ_FLAGS_SECURITY_SOC_NVM 0x1UL
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_get_dev_info_output (size:768b/96B) */
+struct hwrm_nvm_get_dev_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 manufacturer_id;
+ __le16 device_id;
+ __le32 sector_size;
+ __le32 nvram_size;
+ __le32 reserved_size;
+ __le32 available_size;
+ u8 nvm_cfg_ver_maj;
+ u8 nvm_cfg_ver_min;
+ u8 nvm_cfg_ver_upd;
+ u8 flags;
+ #define NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID 0x1UL
+ char pkg_name[16];
+ __le16 hwrm_fw_major;
+ __le16 hwrm_fw_minor;
+ __le16 hwrm_fw_build;
+ __le16 hwrm_fw_patch;
+ __le16 mgmt_fw_major;
+ __le16 mgmt_fw_minor;
+ __le16 mgmt_fw_build;
+ __le16 mgmt_fw_patch;
+ __le16 roce_fw_major;
+ __le16 roce_fw_minor;
+ __le16 roce_fw_build;
+ __le16 roce_fw_patch;
+ __le16 netctrl_fw_major;
+ __le16 netctrl_fw_minor;
+ __le16 netctrl_fw_build;
+ __le16 netctrl_fw_patch;
+ __le16 srt2_fw_major;
+ __le16 srt2_fw_minor;
+ __le16 srt2_fw_build;
+ __le16 srt2_fw_patch;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */
+struct hwrm_nvm_mod_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL
+ __le16 dir_idx;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 checksum;
+};
+
+/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */
+struct hwrm_nvm_mod_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_verify_update_input (size:192b/24B) */
+struct hwrm_nvm_verify_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ u8 unused_0[2];
+};
+
+/* hwrm_nvm_verify_update_output (size:128b/16B) */
+struct hwrm_nvm_verify_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_install_update_input (size:192b/24B) */
+struct hwrm_nvm_install_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 install_type;
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_LAST NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL
+ __le16 flags;
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_VERIFY_ONLY 0x8UL
+ u8 unused_0[2];
+};
+
+/* hwrm_nvm_install_update_output (size:192b/24B) */
+struct hwrm_nvm_install_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 installed_items;
+ u8 result;
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_FAILURE 0xffUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_MALLOC_FAILURE 0xfdUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER 0xfbUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER 0xf3UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE 0xf2UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER 0xecUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE 0xebUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM 0xeaUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH 0xe9UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST 0xe8UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER 0xe7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM 0xe6UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM 0xe5UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH 0xe4UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE 0xe1UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV 0xceUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID 0xcdUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR 0xccUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID 0xcbUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM 0xc5UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM 0xc4UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM 0xc3UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR 0xb9UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR 0xb8UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR 0xb7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND 0xb0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED 0xa7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED
+ u8 problem_item;
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_LAST NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE
+ u8 reset_required;
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_LAST NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
+struct hwrm_nvm_install_update_cmd_err {
+ u8 code;
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK 0x3UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT 0x4UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_get_variable_input (size:320b/40B) */
+struct hwrm_nvm_get_variable_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_LAST NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
+ __le16 dimensions;
+ __le16 index_0;
+ __le16 index_1;
+ __le16 index_2;
+ __le16 index_3;
+ u8 flags;
+ #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL
+ u8 unused_0;
+};
+
+/* hwrm_nvm_get_variable_output (size:128b/16B) */
+struct hwrm_nvm_get_variable_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_LAST NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */
+struct hwrm_nvm_get_variable_cmd_err {
+ u8 code;
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_LAST NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_set_variable_input (size:320b/40B) */
+struct hwrm_nvm_set_variable_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_LAST NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
+ __le16 dimensions;
+ __le16 index_0;
+ __le16 index_1;
+ __le16 index_2;
+ __le16 index_3;
+ u8 flags;
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_MASK 0x70UL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_SFT 4
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FACTORY_DEFAULT 0x80UL
+ u8 unused_0;
+};
+
+/* hwrm_nvm_set_variable_output (size:128b/16B) */
+struct hwrm_nvm_set_variable_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */
+struct hwrm_nvm_set_variable_cmd_err {
+ u8 code;
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_LAST NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR
+ u8 unused_0[7];
+};
+
+/* hwrm_selftest_qlist_input (size:128b/16B) */
+struct hwrm_selftest_qlist_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_selftest_qlist_output (size:2240b/280B) */
+struct hwrm_selftest_qlist_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_tests;
+ u8 available_tests;
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 offline_tests;
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0;
+ __le16 test_timeout;
+ u8 unused_1[2];
+ char test_name[8][32];
+ u8 eyescope_target_BER_support;
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E10_SUPPORTED 0x2UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E11_SUPPORTED 0x3UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED 0x4UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_LAST SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED
+ u8 unused_2[6];
+ u8 valid;
+};
+
+/* hwrm_selftest_exec_input (size:192b/24B) */
+struct hwrm_selftest_exec_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 flags;
+ #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0[7];
+};
+
+/* hwrm_selftest_exec_output (size:128b/16B) */
+struct hwrm_selftest_exec_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 requested_tests;
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 test_success;
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_selftest_irq_input (size:128b/16B) */
+struct hwrm_selftest_irq_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_selftest_irq_output (size:128b/16B) */
+struct hwrm_selftest_irq_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* dbc_dbc (size:64b/8B) */
+struct dbc_dbc {
+ __le32 index;
+ #define DBC_DBC_INDEX_MASK 0xffffffUL
+ #define DBC_DBC_INDEX_SFT 0
+ #define DBC_DBC_EPOCH 0x1000000UL
+ #define DBC_DBC_TOGGLE_MASK 0x6000000UL
+ #define DBC_DBC_TOGGLE_SFT 25
+ __le32 type_path_xid;
+ #define DBC_DBC_XID_MASK 0xfffffUL
+ #define DBC_DBC_XID_SFT 0
+ #define DBC_DBC_PATH_MASK 0x3000000UL
+ #define DBC_DBC_PATH_SFT 24
+ #define DBC_DBC_PATH_ROCE (0x0UL << 24)
+ #define DBC_DBC_PATH_L2 (0x1UL << 24)
+ #define DBC_DBC_PATH_ENGINE (0x2UL << 24)
+ #define DBC_DBC_PATH_LAST DBC_DBC_PATH_ENGINE
+ #define DBC_DBC_VALID 0x4000000UL
+ #define DBC_DBC_DEBUG_TRACE 0x8000000UL
+ #define DBC_DBC_TYPE_MASK 0xf0000000UL
+ #define DBC_DBC_TYPE_SFT 28
+ #define DBC_DBC_TYPE_SQ (0x0UL << 28)
+ #define DBC_DBC_TYPE_RQ (0x1UL << 28)
+ #define DBC_DBC_TYPE_SRQ (0x2UL << 28)
+ #define DBC_DBC_TYPE_SRQ_ARM (0x3UL << 28)
+ #define DBC_DBC_TYPE_CQ (0x4UL << 28)
+ #define DBC_DBC_TYPE_CQ_ARMSE (0x5UL << 28)
+ #define DBC_DBC_TYPE_CQ_ARMALL (0x6UL << 28)
+ #define DBC_DBC_TYPE_CQ_ARMENA (0x7UL << 28)
+ #define DBC_DBC_TYPE_SRQ_ARMENA (0x8UL << 28)
+ #define DBC_DBC_TYPE_CQ_CUTOFF_ACK (0x9UL << 28)
+ #define DBC_DBC_TYPE_NQ (0xaUL << 28)
+ #define DBC_DBC_TYPE_NQ_ARM (0xbUL << 28)
+ #define DBC_DBC_TYPE_NQ_MASK (0xeUL << 28)
+ #define DBC_DBC_TYPE_NULL (0xfUL << 28)
+ #define DBC_DBC_TYPE_LAST DBC_DBC_TYPE_NULL
+};
+
+/* db_push_start (size:64b/8B) */
+struct db_push_start {
+ u64 db;
+ #define DB_PUSH_START_DB_INDEX_MASK 0xffffffUL
+ #define DB_PUSH_START_DB_INDEX_SFT 0
+ #define DB_PUSH_START_DB_PI_LO_MASK 0xff000000UL
+ #define DB_PUSH_START_DB_PI_LO_SFT 24
+ #define DB_PUSH_START_DB_XID_MASK 0xfffff00000000ULL
+ #define DB_PUSH_START_DB_XID_SFT 32
+ #define DB_PUSH_START_DB_PI_HI_MASK 0xf0000000000000ULL
+ #define DB_PUSH_START_DB_PI_HI_SFT 52
+ #define DB_PUSH_START_DB_TYPE_MASK 0xf000000000000000ULL
+ #define DB_PUSH_START_DB_TYPE_SFT 60
+ #define DB_PUSH_START_DB_TYPE_PUSH_START (0xcULL << 60)
+ #define DB_PUSH_START_DB_TYPE_PUSH_END (0xdULL << 60)
+ #define DB_PUSH_START_DB_TYPE_LAST DB_PUSH_START_DB_TYPE_PUSH_END
+};
+
+/* db_push_end (size:64b/8B) */
+struct db_push_end {
+ u64 db;
+ #define DB_PUSH_END_DB_INDEX_MASK 0xffffffUL
+ #define DB_PUSH_END_DB_INDEX_SFT 0
+ #define DB_PUSH_END_DB_PI_LO_MASK 0xff000000UL
+ #define DB_PUSH_END_DB_PI_LO_SFT 24
+ #define DB_PUSH_END_DB_XID_MASK 0xfffff00000000ULL
+ #define DB_PUSH_END_DB_XID_SFT 32
+ #define DB_PUSH_END_DB_PI_HI_MASK 0xf0000000000000ULL
+ #define DB_PUSH_END_DB_PI_HI_SFT 52
+ #define DB_PUSH_END_DB_PATH_MASK 0x300000000000000ULL
+ #define DB_PUSH_END_DB_PATH_SFT 56
+ #define DB_PUSH_END_DB_PATH_ROCE (0x0ULL << 56)
+ #define DB_PUSH_END_DB_PATH_L2 (0x1ULL << 56)
+ #define DB_PUSH_END_DB_PATH_ENGINE (0x2ULL << 56)
+ #define DB_PUSH_END_DB_PATH_LAST DB_PUSH_END_DB_PATH_ENGINE
+ #define DB_PUSH_END_DB_DEBUG_TRACE 0x800000000000000ULL
+ #define DB_PUSH_END_DB_TYPE_MASK 0xf000000000000000ULL
+ #define DB_PUSH_END_DB_TYPE_SFT 60
+ #define DB_PUSH_END_DB_TYPE_PUSH_START (0xcULL << 60)
+ #define DB_PUSH_END_DB_TYPE_PUSH_END (0xdULL << 60)
+ #define DB_PUSH_END_DB_TYPE_LAST DB_PUSH_END_DB_TYPE_PUSH_END
+};
+
+/* db_push_info (size:64b/8B) */
+struct db_push_info {
+ u32 push_size_push_index;
+ #define DB_PUSH_INFO_PUSH_INDEX_MASK 0xffffffUL
+ #define DB_PUSH_INFO_PUSH_INDEX_SFT 0
+ #define DB_PUSH_INFO_PUSH_SIZE_MASK 0x1f000000UL
+ #define DB_PUSH_INFO_PUSH_SIZE_SFT 24
+ u32 reserved32;
+};
+
+/* fw_status_reg (size:32b/4B) */
+struct fw_status_reg {
+ u32 fw_status;
+ #define FW_STATUS_REG_CODE_MASK 0xffffUL
+ #define FW_STATUS_REG_CODE_SFT 0
+ #define FW_STATUS_REG_CODE_READY 0x8000UL
+ #define FW_STATUS_REG_CODE_LAST FW_STATUS_REG_CODE_READY
+ #define FW_STATUS_REG_IMAGE_DEGRADED 0x10000UL
+ #define FW_STATUS_REG_RECOVERABLE 0x20000UL
+ #define FW_STATUS_REG_CRASHDUMP_ONGOING 0x40000UL
+ #define FW_STATUS_REG_CRASHDUMP_COMPLETE 0x80000UL
+ #define FW_STATUS_REG_SHUTDOWN 0x100000UL
+ #define FW_STATUS_REG_CRASHED_NO_MASTER 0x200000UL
+ #define FW_STATUS_REG_RECOVERING 0x400000UL
+ #define FW_STATUS_REG_MANU_DEBUG_STATUS 0x800000UL
+};
+
+/* hcomm_status (size:64b/8B) */
+struct hcomm_status {
+ u32 sig_ver;
+ #define HCOMM_STATUS_VER_MASK 0xffUL
+ #define HCOMM_STATUS_VER_SFT 0
+ #define HCOMM_STATUS_VER_LATEST 0x1UL
+ #define HCOMM_STATUS_VER_LAST HCOMM_STATUS_VER_LATEST
+ #define HCOMM_STATUS_SIGNATURE_MASK 0xffffff00UL
+ #define HCOMM_STATUS_SIGNATURE_SFT 8
+ #define HCOMM_STATUS_SIGNATURE_VAL (0x484353UL << 8)
+ #define HCOMM_STATUS_SIGNATURE_LAST HCOMM_STATUS_SIGNATURE_VAL
+ u32 fw_status_loc;
+ #define HCOMM_STATUS_TRUE_ADDR_SPACE_MASK 0x3UL
+ #define HCOMM_STATUS_TRUE_ADDR_SPACE_SFT 0
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_GRC 0x1UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR0 0x2UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1 0x3UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_LAST HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1
+ #define HCOMM_STATUS_TRUE_OFFSET_MASK 0xfffffffcUL
+ #define HCOMM_STATUS_TRUE_OFFSET_SFT 2
+};
+#define HCOMM_STATUS_STRUCT_LOC 0x31001F0UL
+
+#endif /* _BNXT_HSI_H_ */
diff --git a/include/linux/bpf-cgroup-defs.h b/include/linux/bpf-cgroup-defs.h
index 0985221d5478..c9e6b26abab6 100644
--- a/include/linux/bpf-cgroup-defs.h
+++ b/include/linux/bpf-cgroup-defs.h
@@ -63,6 +63,7 @@ struct cgroup_bpf {
*/
struct hlist_head progs[MAX_CGROUP_BPF_ATTACH_TYPE];
u8 flags[MAX_CGROUP_BPF_ATTACH_TYPE];
+ u64 revisions[MAX_CGROUP_BPF_ATTACH_TYPE];
/* list of cgroup shared storages */
struct list_head storages;
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 70c8b94e797a..082ccd8ad96b 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -103,7 +103,6 @@ struct bpf_cgroup_storage {
struct bpf_cgroup_link {
struct bpf_link link;
struct cgroup *cgroup;
- enum bpf_attach_type type;
};
struct bpf_prog_list {
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 5b25d278409b..f9cd2164ed23 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1538,6 +1538,37 @@ struct btf_mod_pair {
struct bpf_kfunc_desc_tab;
+enum bpf_stream_id {
+ BPF_STDOUT = 1,
+ BPF_STDERR = 2,
+};
+
+struct bpf_stream_elem {
+ struct llist_node node;
+ int total_len;
+ int consumed_len;
+ char str[];
+};
+
+enum {
+ /* 100k bytes */
+ BPF_STREAM_MAX_CAPACITY = 100000ULL,
+};
+
+struct bpf_stream {
+ atomic_t capacity;
+ struct llist_head log; /* list of in-flight stream elements in LIFO order */
+
+ struct mutex lock; /* lock protecting backlog_{head,tail} */
+ struct llist_node *backlog_head; /* list of in-flight stream elements in FIFO order */
+ struct llist_node *backlog_tail; /* tail of the list above */
+};
+
+struct bpf_stream_stage {
+ struct llist_head log;
+ int len;
+};
+
struct bpf_prog_aux {
atomic64_t refcnt;
u32 used_map_cnt;
@@ -1646,6 +1677,7 @@ struct bpf_prog_aux {
struct work_struct work;
struct rcu_head rcu;
};
+ struct bpf_stream stream[2];
};
struct bpf_prog {
@@ -1697,11 +1729,10 @@ struct bpf_link {
enum bpf_link_type type;
const struct bpf_link_ops *ops;
struct bpf_prog *prog;
- /* whether BPF link itself has "sleepable" semantics, which can differ
- * from underlying BPF program having a "sleepable" semantics, as BPF
- * link's semantics is determined by target attach hook
- */
- bool sleepable;
+
+ u32 flags;
+ enum bpf_attach_type attach_type;
+
/* rcu is used before freeing, work can be used to schedule that
* RCU-based freeing before that, so they never overlap
*/
@@ -1709,6 +1740,11 @@ struct bpf_link {
struct rcu_head rcu;
struct work_struct work;
};
+ /* whether BPF link itself has "sleepable" semantics, which can differ
+ * from underlying BPF program having a "sleepable" semantics, as BPF
+ * link's semantics is determined by target attach hook
+ */
+ bool sleepable;
};
struct bpf_link_ops {
@@ -1748,7 +1784,6 @@ struct bpf_shim_tramp_link {
struct bpf_tracing_link {
struct bpf_tramp_link link;
- enum bpf_attach_type attach_type;
struct bpf_trampoline *trampoline;
struct bpf_prog *tgt_prog;
};
@@ -2001,11 +2036,13 @@ int bpf_prog_ctx_arg_info_init(struct bpf_prog *prog,
#if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM)
int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
- int cgroup_atype);
+ int cgroup_atype,
+ enum bpf_attach_type attach_type);
void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog);
#else
static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
- int cgroup_atype)
+ int cgroup_atype,
+ enum bpf_attach_type attach_type)
{
return -EOPNOTSUPP;
}
@@ -2288,6 +2325,9 @@ bpf_prog_run_array_uprobe(const struct bpf_prog_array *array,
return ret;
}
+bool bpf_jit_bypass_spec_v1(void);
+bool bpf_jit_bypass_spec_v4(void);
+
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
extern struct mutex bpf_stats_enabled_mutex;
@@ -2314,6 +2354,7 @@ extern const struct super_operations bpf_super_ops;
extern const struct file_operations bpf_map_fops;
extern const struct file_operations bpf_prog_fops;
extern const struct file_operations bpf_iter_fops;
+extern const struct file_operations bpf_token_fops;
#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
extern const struct bpf_prog_ops _name ## _prog_ops; \
@@ -2405,6 +2446,7 @@ int generic_map_delete_batch(struct bpf_map *map,
struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
+
int bpf_map_alloc_pages(const struct bpf_map *map, int nid,
unsigned long nr_pages, struct page **page_array);
#ifdef CONFIG_MEMCG
@@ -2475,22 +2517,27 @@ static inline bool bpf_allow_uninit_stack(const struct bpf_token *token)
static inline bool bpf_bypass_spec_v1(const struct bpf_token *token)
{
- return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
+ return bpf_jit_bypass_spec_v1() ||
+ cpu_mitigations_off() ||
+ bpf_token_capable(token, CAP_PERFMON);
}
static inline bool bpf_bypass_spec_v4(const struct bpf_token *token)
{
- return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
+ return bpf_jit_bypass_spec_v4() ||
+ cpu_mitigations_off() ||
+ bpf_token_capable(token, CAP_PERFMON);
}
int bpf_map_new_fd(struct bpf_map *map, int flags);
int bpf_prog_new_fd(struct bpf_prog *prog);
void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
- const struct bpf_link_ops *ops, struct bpf_prog *prog);
+ const struct bpf_link_ops *ops, struct bpf_prog *prog,
+ enum bpf_attach_type attach_type);
void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type,
const struct bpf_link_ops *ops, struct bpf_prog *prog,
- bool sleepable);
+ enum bpf_attach_type attach_type, bool sleepable);
int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
int bpf_link_settle(struct bpf_link_primer *primer);
void bpf_link_cleanup(struct bpf_link_primer *primer);
@@ -2505,6 +2552,9 @@ void bpf_token_inc(struct bpf_token *token);
void bpf_token_put(struct bpf_token *token);
int bpf_token_create(union bpf_attr *attr);
struct bpf_token *bpf_token_get_from_fd(u32 ufd);
+int bpf_token_get_info_by_fd(struct bpf_token *token,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd);
bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type);
@@ -2842,13 +2892,13 @@ bpf_prog_inc_not_zero(struct bpf_prog *prog)
static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
const struct bpf_link_ops *ops,
- struct bpf_prog *prog)
+ struct bpf_prog *prog, enum bpf_attach_type attach_type)
{
}
static inline void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type,
const struct bpf_link_ops *ops, struct bpf_prog *prog,
- bool sleepable)
+ enum bpf_attach_type attach_type, bool sleepable)
{
}
@@ -2903,6 +2953,13 @@ static inline struct bpf_token *bpf_token_get_from_fd(u32 ufd)
return ERR_PTR(-EOPNOTSUPP);
}
+static inline int bpf_token_get_info_by_fd(struct bpf_token *token,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ return -EOPNOTSUPP;
+}
+
static inline void __dev_flush(struct list_head *flush_list)
{
}
@@ -3543,6 +3600,16 @@ bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
#define MAX_BPRINTF_VARARGS 12
#define MAX_BPRINTF_BUF 1024
+/* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
+ * arguments representation.
+ */
+#define MAX_BPRINTF_BIN_ARGS 512
+
+struct bpf_bprintf_buffers {
+ char bin_args[MAX_BPRINTF_BIN_ARGS];
+ char buf[MAX_BPRINTF_BUF];
+};
+
struct bpf_bprintf_data {
u32 *bin_args;
char *buf;
@@ -3550,9 +3617,33 @@ struct bpf_bprintf_data {
bool get_buf;
};
-int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
+int bpf_bprintf_prepare(const char *fmt, u32 fmt_size, const u64 *raw_args,
u32 num_args, struct bpf_bprintf_data *data);
void bpf_bprintf_cleanup(struct bpf_bprintf_data *data);
+int bpf_try_get_buffers(struct bpf_bprintf_buffers **bufs);
+void bpf_put_buffers(void);
+
+void bpf_prog_stream_init(struct bpf_prog *prog);
+void bpf_prog_stream_free(struct bpf_prog *prog);
+int bpf_prog_stream_read(struct bpf_prog *prog, enum bpf_stream_id stream_id, void __user *buf, int len);
+void bpf_stream_stage_init(struct bpf_stream_stage *ss);
+void bpf_stream_stage_free(struct bpf_stream_stage *ss);
+__printf(2, 3)
+int bpf_stream_stage_printk(struct bpf_stream_stage *ss, const char *fmt, ...);
+int bpf_stream_stage_commit(struct bpf_stream_stage *ss, struct bpf_prog *prog,
+ enum bpf_stream_id stream_id);
+int bpf_stream_stage_dump_stack(struct bpf_stream_stage *ss);
+
+#define bpf_stream_printk(ss, ...) bpf_stream_stage_printk(&ss, __VA_ARGS__)
+#define bpf_stream_dump_stack(ss) bpf_stream_stage_dump_stack(&ss)
+
+#define bpf_stream_stage(ss, prog, stream_id, expr) \
+ ({ \
+ bpf_stream_stage_init(&ss); \
+ (expr); \
+ bpf_stream_stage_commit(&ss, prog, stream_id); \
+ bpf_stream_stage_free(&ss); \
+ })
#ifdef CONFIG_BPF_LSM
void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype);
@@ -3588,4 +3679,8 @@ static inline bool bpf_is_subprog(const struct bpf_prog *prog)
return prog->aux->func_idx != 0;
}
+int bpf_prog_get_file_line(struct bpf_prog *prog, unsigned long ip, const char **filep,
+ const char **linep, int *nump);
+struct bpf_prog *bpf_prog_find_from_stack(void);
+
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 256274acb1d8..94defa405c85 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -344,7 +344,7 @@ struct bpf_func_state {
#define MAX_CALL_FRAMES 8
-/* instruction history flags, used in bpf_insn_hist_entry.flags field */
+/* instruction history flags, used in bpf_jmp_history_entry.flags field */
enum {
/* instruction references stack slot through PTR_TO_STACK register;
* we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8)
@@ -366,7 +366,7 @@ enum {
static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES);
static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8);
-struct bpf_insn_hist_entry {
+struct bpf_jmp_history_entry {
u32 idx;
/* insn idx can't be bigger than 1 million */
u32 prev_idx : 20;
@@ -449,32 +449,20 @@ struct bpf_verifier_state {
/* first and last insn idx of this verifier state */
u32 first_insn_idx;
u32 last_insn_idx;
- /* If this state is a part of states loop this field points to some
- * parent of this state such that:
- * - it is also a member of the same states loop;
- * - DFS states traversal starting from initial state visits loop_entry
- * state before this state.
- * Used to compute topmost loop entry for state loops.
- * State loops might appear because of open coded iterators logic.
- * See get_loop_entry() for more information.
+ /* if this state is a backedge state then equal_state
+ * records cached state to which this state is equal.
*/
- struct bpf_verifier_state *loop_entry;
- /* Sub-range of env->insn_hist[] corresponding to this state's
- * instruction history.
- * Backtracking is using it to go from last to first.
- * For most states instruction history is short, 0-3 instructions.
+ struct bpf_verifier_state *equal_state;
+ /* jmp history recorded from first to last.
+ * backtracking is using it to go from last to first.
+ * For most states jmp_history_cnt is [0-3].
* For loops can go up to ~40.
*/
- u32 insn_hist_start;
- u32 insn_hist_end;
+ struct bpf_jmp_history_entry *jmp_history;
+ u32 jmp_history_cnt;
u32 dfs_depth;
u32 callback_unroll_depth;
u32 may_goto_depth;
- /* If this state was ever pointed-to by other state's loop_entry field
- * this flag would be set to true. Used to avoid freeing such states
- * while they are still in use.
- */
- u32 used_as_loop_entry;
};
#define bpf_get_spilled_reg(slot, frame, mask) \
@@ -580,7 +568,8 @@ struct bpf_insn_aux_data {
u64 map_key_state; /* constant (32 bit) key tracking for maps */
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
- bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
+ bool nospec; /* do not execute this instruction speculatively */
+ bool nospec_result; /* result is unsafe under speculation, nospec must follow */
bool zext_dst; /* this insn zero extends dst reg */
bool needs_zext; /* alu op needs to clear upper bits */
bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */
@@ -609,6 +598,11 @@ struct bpf_insn_aux_data {
* accepts callback function as a parameter.
*/
bool calls_callback;
+ /*
+ * CFG strongly connected component this instruction belongs to,
+ * zero if it is a singleton SCC.
+ */
+ u32 scc;
/* registers alive before this instruction. */
u16 live_regs_before;
};
@@ -718,6 +712,38 @@ struct bpf_idset {
u32 ids[BPF_ID_MAP_SIZE];
};
+/* see verifier.c:compute_scc_callchain() */
+struct bpf_scc_callchain {
+ /* call sites from bpf_verifier_state->frame[*]->callsite leading to this SCC */
+ u32 callsites[MAX_CALL_FRAMES - 1];
+ /* last frame in a chain is identified by SCC id */
+ u32 scc;
+};
+
+/* verifier state waiting for propagate_backedges() */
+struct bpf_scc_backedge {
+ struct bpf_scc_backedge *next;
+ struct bpf_verifier_state state;
+};
+
+struct bpf_scc_visit {
+ struct bpf_scc_callchain callchain;
+ /* first state in current verification path that entered SCC
+ * identified by the callchain
+ */
+ struct bpf_verifier_state *entry_state;
+ struct bpf_scc_backedge *backedges; /* list of backedges */
+ u32 num_backedges;
+};
+
+/* An array of bpf_scc_visit structs sharing tht same bpf_scc_callchain->scc
+ * but having different bpf_scc_callchain->callsites.
+ */
+struct bpf_scc_info {
+ u32 num_visits;
+ struct bpf_scc_visit visits[];
+};
+
/* single container for all structs
* one verifier_env per bpf_check() call
*/
@@ -775,9 +801,7 @@ struct bpf_verifier_env {
int cur_postorder;
} cfg;
struct backtrack_state bt;
- struct bpf_insn_hist_entry *insn_hist;
- struct bpf_insn_hist_entry *cur_hist_ent;
- u32 insn_hist_cap;
+ struct bpf_jmp_history_entry *cur_hist_ent;
u32 pass_cnt; /* number of times do_check() was called */
u32 subprog_cnt;
/* number of instructions analyzed by the verifier */
@@ -799,6 +823,7 @@ struct bpf_verifier_env {
u32 longest_mark_read_walk;
u32 free_list_size;
u32 explored_states_size;
+ u32 num_backedges;
bpfptr_t fd_array;
/* bit mask to keep track of whether a register has been accessed
@@ -816,6 +841,10 @@ struct bpf_verifier_env {
char tmp_str_buf[TMP_STR_BUF_LEN];
struct bpf_insn insn_buf[INSN_BUF_SIZE];
struct bpf_insn epilogue_buf[INSN_BUF_SIZE];
+ struct bpf_scc_callchain callchain_buf;
+ /* array of pointers to bpf_scc_info indexed by SCC id */
+ struct bpf_scc_info **scc_info;
+ u32 scc_cnt;
};
static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog)
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 028b3e00378e..15c35655f482 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -183,6 +183,12 @@
#define BCM_LED_MULTICOLOR_PROGRAM 0xa
/*
+ * Broadcom Synchronous Ethernet Controls (expansion register 0x0E)
+ */
+#define BCM_EXP_SYNC_ETHERNET (MII_BCM54XX_EXP_SEL_ER + 0x0E)
+#define BCM_EXP_SYNC_ETHERNET_MII_LITE BIT(11)
+
+/*
* BCM5482: Shadow registers
* Shadow values go into bits [14:10] of register 0x1c to select a shadow
* register to access.
diff --git a/include/linux/btf.h b/include/linux/btf.h
index b2983706292f..9eda6b113f9b 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -221,6 +221,9 @@ bool btf_is_vmlinux(const struct btf *btf);
struct module *btf_try_get_module(const struct btf *btf);
u32 btf_nr_types(const struct btf *btf);
struct btf *btf_base_btf(const struct btf *btf);
+bool btf_type_is_i32(const struct btf_type *t);
+bool btf_type_is_i64(const struct btf_type *t);
+bool btf_type_is_primitive(const struct btf_type *t);
bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
const struct btf_member *m,
u32 expected_offset, u32 expected_size);
diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h
index 9b8a9c39614b..5dfdbb63b1d5 100644
--- a/include/linux/can/bittiming.h
+++ b/include/linux/can/bittiming.h
@@ -14,7 +14,7 @@
#define CAN_BITRATE_UNSET 0
#define CAN_BITRATE_UNKNOWN (-1U)
-#define CAN_CTRLMODE_TDC_MASK \
+#define CAN_CTRLMODE_FD_TDC_MASK \
(CAN_CTRLMODE_TDC_AUTO | CAN_CTRLMODE_TDC_MANUAL)
/*
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 492d23bec7be..9a92cbe5b2cb 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -91,9 +91,9 @@ struct can_priv {
struct can_berr_counter *bec);
};
-static inline bool can_tdc_is_enabled(const struct can_priv *priv)
+static inline bool can_fd_tdc_is_enabled(const struct can_priv *priv)
{
- return !!(priv->ctrlmode & CAN_CTRLMODE_TDC_MASK);
+ return !!(priv->ctrlmode & CAN_CTRLMODE_FD_TDC_MASK);
}
/*
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index e61687d5e496..6b93a64115fe 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -375,15 +375,12 @@ struct css_rstat_cpu {
* Child cgroups with stat updates on this cpu since the last read
* are linked on the parent's ->updated_children through
* ->updated_next. updated_children is terminated by its container css.
- *
- * In addition to being more compact, singly-linked list pointing to
- * the css makes it unnecessary for each per-cpu struct to point back
- * to the associated css.
- *
- * Protected by per-cpu css->ss->rstat_ss_cpu_lock.
*/
struct cgroup_subsys_state *updated_children;
struct cgroup_subsys_state *updated_next; /* NULL if not on the list */
+
+ struct llist_node lnode; /* lockless list for update */
+ struct cgroup_subsys_state *owner; /* back pointer */
};
/*
@@ -821,7 +818,7 @@ struct cgroup_subsys {
unsigned int depends_on;
spinlock_t rstat_ss_lock;
- raw_spinlock_t __percpu *rstat_ss_cpu_lock;
+ struct llist_head __percpu *lhead; /* lockless update list head */
};
extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
@@ -898,14 +895,12 @@ static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
#endif
}
+#ifdef CONFIG_CGROUP_NET_CLASSID
static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
{
-#ifdef CONFIG_CGROUP_NET_CLASSID
return READ_ONCE(skcd->classid);
-#else
- return 0;
-#endif
}
+#endif
static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
u16 prioidx)
@@ -915,13 +910,13 @@ static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
#endif
}
+#ifdef CONFIG_CGROUP_NET_CLASSID
static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
u32 classid)
{
-#ifdef CONFIG_CGROUP_NET_CLASSID
WRITE_ONCE(skcd->classid, classid);
-#endif
}
+#endif
#else /* CONFIG_SOCK_CGROUP_DATA */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 2e6e603b7493..630705a47129 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -1360,6 +1360,32 @@ void clk_hw_unregister(struct clk_hw *hw);
/* helper functions */
const char *__clk_get_name(const struct clk *clk);
const char *clk_hw_get_name(const struct clk_hw *hw);
+
+/**
+ * clk_hw_get_dev() - get device from an hardware clock.
+ * @hw: the clk_hw pointer to get the struct device from
+ *
+ * This is a helper to get the struct device associated with a hardware
+ * clock. Some clock controllers, such as the one registered with
+ * CLK_OF_DECLARE(), may have not provided a device pointer while
+ * registering the clock.
+ *
+ * Return: the struct device associated with the clock, or NULL if there
+ * is none.
+ */
+struct device *clk_hw_get_dev(const struct clk_hw *hw);
+
+/**
+ * clk_hw_get_of_node() - get device_node from a hardware clock.
+ * @hw: the clk_hw pointer to get the struct device_node from
+ *
+ * This is a helper to get the struct device_node associated with a
+ * hardware clock.
+ *
+ * Return: the struct device_node associated with the clock, or NULL
+ * if there is none.
+ */
+struct device_node *clk_hw_get_of_node(const struct clk_hw *hw);
#ifdef CONFIG_COMMON_CLK
struct clk_hw *__clk_get_hw(struct clk *clk);
#else
diff --git a/include/linux/codetag.h b/include/linux/codetag.h
index 5f2b9a1f722c..457ed8fd3214 100644
--- a/include/linux/codetag.h
+++ b/include/linux/codetag.h
@@ -54,6 +54,7 @@ struct codetag_iterator {
struct codetag_module *cmod;
unsigned long mod_id;
struct codetag *ct;
+ unsigned long mod_seq;
};
#ifdef MODULE
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 4fc8e26914ad..fa4ffe037bc7 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -89,6 +89,9 @@
#define __no_sanitize_coverage
#endif
+/* Only Clang needs to disable the coverage sanitizer for kstack_erase. */
+#define __no_kstack_erase __no_sanitize_coverage
+
#if __has_feature(shadow_call_stack)
# define __noscs __attribute__((__no_sanitize__("shadow-call-stack")))
#endif
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 2b77d12e07b2..16755431fc11 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -424,6 +424,10 @@ struct ftrace_likely_data {
# define randomized_struct_fields_end
#endif
+#ifndef __no_kstack_erase
+# define __no_kstack_erase
+#endif
+
#ifndef __noscs
# define __noscs
#endif
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index 96e8a66da133..68861da4cf7c 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -10,7 +10,7 @@
#ifdef CONFIG_COREDUMP
struct core_vma_metadata {
unsigned long start, end;
- unsigned long flags;
+ vm_flags_t flags;
unsigned long dump_size;
unsigned long pgoff;
struct file *file;
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 6378370a952f..b91b993f58ee 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -187,20 +187,31 @@ static inline void arch_cpu_finalize_init(void) { }
void play_idle_precise(u64 duration_ns, u64 latency_ns);
-static inline void play_idle(unsigned long duration_us)
-{
- play_idle_precise(duration_us * NSEC_PER_USEC, U64_MAX);
-}
-
#ifdef CONFIG_HOTPLUG_CPU
void cpuhp_report_idle_dead(void);
#else
static inline void cpuhp_report_idle_dead(void) { }
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+enum cpu_attack_vectors {
+ CPU_MITIGATE_USER_KERNEL,
+ CPU_MITIGATE_USER_USER,
+ CPU_MITIGATE_GUEST_HOST,
+ CPU_MITIGATE_GUEST_GUEST,
+ NR_CPU_ATTACK_VECTORS,
+};
+
+enum smt_mitigations {
+ SMT_MITIGATIONS_OFF,
+ SMT_MITIGATIONS_AUTO,
+ SMT_MITIGATIONS_ON,
+};
+
#ifdef CONFIG_CPU_MITIGATIONS
extern bool cpu_mitigations_off(void);
extern bool cpu_mitigations_auto_nosmt(void);
+extern bool cpu_attack_vector_mitigated(enum cpu_attack_vectors v);
+extern enum smt_mitigations smt_mitigations;
#else
static inline bool cpu_mitigations_off(void)
{
@@ -210,6 +221,11 @@ static inline bool cpu_mitigations_auto_nosmt(void)
{
return false;
}
+static inline bool cpu_attack_vector_mitigated(enum cpu_attack_vectors v)
+{
+ return false;
+}
+#define smt_mitigations SMT_MITIGATIONS_OFF
#endif
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 7ae80a7ca81e..ff8f41ab7ce6 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -355,6 +355,18 @@ unsigned int cpumask_next_wrap(int n, const struct cpumask *src)
}
/**
+ * cpumask_random - get random cpu in *src.
+ * @src: cpumask pointer
+ *
+ * Return: random set bit, or >= nr_cpu_ids if @src is empty.
+ */
+static __always_inline
+unsigned int cpumask_random(const struct cpumask *src)
+{
+ return find_random_bit(cpumask_bits(src), nr_cpu_ids);
+}
+
+/**
* for_each_cpu - iterate over every cpu in a mask
* @cpu: the (optionally unsigned) integer iterator
* @mask: the cpumask pointer
@@ -547,22 +559,6 @@ unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
}
/**
- * cpumask_nth_andnot - get the Nth cpu set in 1st cpumask, and clear in 2nd.
- * @srcp1: the cpumask pointer
- * @srcp2: the cpumask pointer
- * @cpu: the Nth cpu to find, starting from 0
- *
- * Return: >= nr_cpu_ids if such cpu doesn't exist.
- */
-static __always_inline
-unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1,
- const struct cpumask *srcp2)
-{
- return find_nth_andnot_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
- small_cpumask_bits, cpumask_check(cpu));
-}
-
-/**
* cpumask_nth_and_andnot - get the Nth cpu set in 1st and 2nd cpumask, and clear in 3rd.
* @srcp1: the cpumask pointer
* @srcp2: the cpumask pointer
@@ -609,6 +605,18 @@ void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
__set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
+/**
+ * cpumask_clear_cpus - clear cpus in a cpumask
+ * @dstp: the cpumask pointer
+ * @cpu: cpu number (< nr_cpu_ids)
+ * @ncpus: number of cpus to clear (< nr_cpu_ids)
+ */
+static __always_inline void cpumask_clear_cpus(struct cpumask *dstp,
+ unsigned int cpu, unsigned int ncpus)
+{
+ cpumask_check(cpu + ncpus - 1);
+ bitmap_clear(cpumask_bits(dstp), cpumask_check(cpu), ncpus);
+}
/**
* cpumask_clear_cpu - clear a cpu in a cpumask
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index b50f1954d1bb..a2137e19be7d 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -136,6 +136,9 @@
/* Set if the algorithm supports virtual addresses. */
#define CRYPTO_ALG_REQ_VIRT 0x00040000
+/* Set if the algorithm cannot have a fallback (e.g., phmac). */
+#define CRYPTO_ALG_NO_FALLBACK 0x00080000
+
/* The high bits 0xff000000 are reserved for type-specific flags. */
/*
diff --git a/include/linux/damon.h b/include/linux/damon.h
index a4011726cb3b..f13664c62ddd 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -448,12 +448,29 @@ struct damos_access_pattern {
};
/**
+ * struct damos_migrate_dests - Migration destination nodes and their weights.
+ * @node_id_arr: Array of migration destination node ids.
+ * @weight_arr: Array of migration weights for @node_id_arr.
+ * @nr_dests: Length of the @node_id_arr and @weight_arr arrays.
+ *
+ * @node_id_arr is an array of the ids of migration destination nodes.
+ * @weight_arr is an array of the weights for those. The weights in
+ * @weight_arr are for nodes in @node_id_arr of same array index.
+ */
+struct damos_migrate_dests {
+ unsigned int *node_id_arr;
+ unsigned int *weight_arr;
+ size_t nr_dests;
+};
+
+/**
* struct damos - Represents a Data Access Monitoring-based Operation Scheme.
* @pattern: Access pattern of target regions.
- * @action: &damo_action to be applied to the target regions.
+ * @action: &damos_action to be applied to the target regions.
* @apply_interval_us: The time between applying the @action.
* @quota: Control the aggressiveness of this scheme.
* @wmarks: Watermarks for automated (in)activation of this scheme.
+ * @migrate_dests: Destination nodes if @action is "migrate_{hot,cold}".
* @target_nid: Destination node if @action is "migrate_{hot,cold}".
* @filters: Additional set of &struct damos_filter for &action.
* @ops_filters: ops layer handling &struct damos_filter objects list.
@@ -472,9 +489,12 @@ struct damos_access_pattern {
* monitoring context are inactive, DAMON stops monitoring either, and just
* repeatedly checks the watermarks.
*
+ * @migrate_dests specifies multiple migration target nodes with different
+ * weights for migrate_hot or migrate_cold actions. @target_nid is ignored if
+ * this is set.
+ *
* @target_nid is used to set the migration target node for migrate_hot or
- * migrate_cold actions, which means it's only meaningful when @action is either
- * "migrate_hot" or "migrate_cold".
+ * migrate_cold actions, and @migrate_dests is unset.
*
* Before applying the &action to a memory region, &struct damon_operations
* implementation could check pages of the region and skip &action to respect
@@ -517,7 +537,10 @@ struct damos {
struct damos_quota quota;
struct damos_watermarks wmarks;
union {
- int target_nid;
+ struct {
+ int target_nid;
+ struct damos_migrate_dests migrate_dests;
+ };
};
struct list_head filters;
struct list_head ops_filters;
@@ -553,6 +576,7 @@ enum damon_ops_id {
* @get_scheme_score: Get the score of a region for a scheme.
* @apply_scheme: Apply a DAMON-based operation scheme.
* @target_valid: Determine if the target is valid.
+ * @cleanup_target: Clean up each target before deallocation.
* @cleanup: Clean up the context.
*
* DAMON can be extended for various address spaces and usages. For this,
@@ -585,6 +609,7 @@ enum damon_ops_id {
* filters (&struct damos_filter) that handled by itself.
* @target_valid should check whether the target is still valid for the
* monitoring.
+ * @cleanup_target is called before the target will be deallocated.
* @cleanup is called from @kdamond just before its termination.
*/
struct damon_operations {
@@ -600,42 +625,16 @@ struct damon_operations {
struct damon_target *t, struct damon_region *r,
struct damos *scheme, unsigned long *sz_filter_passed);
bool (*target_valid)(struct damon_target *t);
+ void (*cleanup_target)(struct damon_target *t);
void (*cleanup)(struct damon_ctx *context);
};
-/**
- * struct damon_callback - Monitoring events notification callbacks.
- *
- * @after_wmarks_check: Called after each schemes' watermarks check.
- * @after_aggregation: Called after each aggregation.
- * @before_terminate: Called before terminating the monitoring.
- *
- * The monitoring thread (&damon_ctx.kdamond) calls @before_terminate just
- * before finishing the monitoring.
- *
- * The monitoring thread calls @after_wmarks_check after each DAMON-based
- * operation schemes' watermarks check. If users need to make changes to the
- * attributes of the monitoring context while it's deactivated due to the
- * watermarks, this is the good place to do.
- *
- * The monitoring thread calls @after_aggregation for each of the aggregation
- * intervals. Therefore, users can safely access the monitoring results
- * without additional protection. For the reason, users are recommended to use
- * these callback for the accesses to the results.
- *
- * If any callback returns non-zero, monitoring stops.
- */
-struct damon_callback {
- int (*after_wmarks_check)(struct damon_ctx *context);
- int (*after_aggregation)(struct damon_ctx *context);
- void (*before_terminate)(struct damon_ctx *context);
-};
-
/*
* struct damon_call_control - Control damon_call().
*
* @fn: Function to be called back.
* @data: Data that will be passed to @fn.
+ * @repeat: Repeat invocations.
* @return_code: Return code from @fn invocation.
*
* Control damon_call(), which requests specific kdamond to invoke a given
@@ -644,19 +643,22 @@ struct damon_callback {
struct damon_call_control {
int (*fn)(void *data);
void *data;
+ bool repeat;
int return_code;
/* private: internal use only */
/* informs if the kdamond finished handling of the request */
struct completion completion;
/* informs if the kdamond canceled @fn infocation */
bool canceled;
+ /* List head for siblings. */
+ struct list_head list;
};
/**
* struct damon_intervals_goal - Monitoring intervals auto-tuning goal.
*
* @access_bp: Access events observation ratio to achieve in bp.
- * @aggrs: Number of aggregations to acheive @access_bp within.
+ * @aggrs: Number of aggregations to achieve @access_bp within.
* @min_sample_us: Minimum resulting sampling interval in microseconds.
* @max_sample_us: Maximum resulting sampling interval in microseconds.
*
@@ -697,7 +699,7 @@ struct damon_intervals_goal {
* ``mmap()`` calls from the application, in case of virtual memory monitoring)
* and applies the changes for each @ops_update_interval. All time intervals
* are in micro-seconds. Please refer to &struct damon_operations and &struct
- * damon_callback for more detail.
+ * damon_call_control for more detail.
*/
struct damon_attrs {
unsigned long sample_interval;
@@ -744,7 +746,6 @@ struct damon_attrs {
* Accesses to other fields must be protected by themselves.
*
* @ops: Set of monitoring operations for given use cases.
- * @callback: Set of callbacks for monitoring events notifications.
*
* @adaptive_targets: Head of monitoring targets (&damon_target) list.
* @schemes: Head of schemes (&damos) list.
@@ -775,8 +776,9 @@ struct damon_ctx {
/* for scheme quotas prioritization */
unsigned long *regions_score_histogram;
- struct damon_call_control *call_control;
- struct mutex call_control_lock;
+ /* lists of &struct damon_call_control */
+ struct list_head call_controls;
+ struct mutex call_controls_lock;
struct damos_walk_control *walk_control;
struct mutex walk_control_lock;
@@ -786,7 +788,6 @@ struct damon_ctx {
struct mutex kdamond_lock;
struct damon_operations ops;
- struct damon_callback callback;
struct list_head adaptive_targets;
struct list_head schemes;
@@ -905,7 +906,7 @@ struct damon_target *damon_new_target(void);
void damon_add_target(struct damon_ctx *ctx, struct damon_target *t);
bool damon_targets_empty(struct damon_ctx *ctx);
void damon_free_target(struct damon_target *t);
-void damon_destroy_target(struct damon_target *t);
+void damon_destroy_target(struct damon_target *t, struct damon_ctx *ctx);
unsigned int damon_nr_regions(struct damon_target *t);
struct damon_ctx *damon_new_ctx(void);
@@ -934,6 +935,7 @@ static inline unsigned int damon_max_nr_accesses(const struct damon_attrs *attrs
int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
+bool damon_is_running(struct damon_ctx *ctx);
int damon_call(struct damon_ctx *ctx, struct damon_call_control *control);
int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control);
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 78891518291d..9d624f4d9df6 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -26,7 +26,7 @@ struct dax_operations {
* number of pages available for DAX at that pfn.
*/
long (*direct_access)(struct dax_device *, pgoff_t, long,
- enum dax_access_mode, void **, pfn_t *);
+ enum dax_access_mode, void **, unsigned long *);
/* zero_page_range: required operation. Zero page range */
int (*zero_page_range)(struct dax_device *, pgoff_t, size_t);
/*
@@ -243,7 +243,7 @@ static inline void dax_break_layout_final(struct inode *inode)
bool dax_alive(struct dax_device *dax_dev);
void *dax_get_private(struct dax_device *dax_dev);
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
- enum dax_access_mode mode, void **kaddr, pfn_t *pfn);
+ enum dax_access_mode mode, void **kaddr, unsigned long *pfn);
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i);
size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
@@ -257,9 +257,10 @@ void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops);
vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order,
- pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
+ unsigned long *pfnp, int *errp,
+ const struct iomap_ops *ops);
vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
- unsigned int order, pfn_t pfn);
+ unsigned int order, unsigned long pfn);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
void dax_delete_mapping_range(struct address_space *mapping,
loff_t start, loff_t end);
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index cb95951547ab..84fdc3a6a19a 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -156,7 +156,7 @@ typedef int (*dm_busy_fn) (struct dm_target *ti);
*/
typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
long nr_pages, enum dax_access_mode node, void **kaddr,
- pfn_t *pfn);
+ unsigned long *pfn);
typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
size_t nr_pages);
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index b12776883d14..64639e104110 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -26,6 +26,7 @@
struct dma_fence;
struct dma_fence_ops;
struct dma_fence_cb;
+struct seq_file;
/**
* struct dma_fence - software synchronization primitive
@@ -97,6 +98,7 @@ struct dma_fence {
};
enum dma_fence_flag_bits {
+ DMA_FENCE_FLAG_SEQNO64_BIT,
DMA_FENCE_FLAG_SIGNALED_BIT,
DMA_FENCE_FLAG_TIMESTAMP_BIT,
DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
@@ -125,14 +127,6 @@ struct dma_fence_cb {
*/
struct dma_fence_ops {
/**
- * @use_64bit_seqno:
- *
- * True if this dma_fence implementation uses 64bit seqno, false
- * otherwise.
- */
- bool use_64bit_seqno;
-
- /**
* @get_driver_name:
*
* Returns the driver name. This is a callback to allow drivers to
@@ -262,6 +256,9 @@ struct dma_fence_ops {
void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
spinlock_t *lock, u64 context, u64 seqno);
+void dma_fence_init64(struct dma_fence *fence, const struct dma_fence_ops *ops,
+ spinlock_t *lock, u64 context, u64 seqno);
+
void dma_fence_release(struct kref *kref);
void dma_fence_free(struct dma_fence *fence);
void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq);
@@ -382,6 +379,29 @@ bool dma_fence_remove_callback(struct dma_fence *fence,
void dma_fence_enable_sw_signaling(struct dma_fence *fence);
/**
+ * DOC: Safe external access to driver provided object members
+ *
+ * All data not stored directly in the dma-fence object, such as the
+ * &dma_fence.lock and memory potentially accessed by functions in the
+ * &dma_fence.ops table, MUST NOT be accessed after the fence has been signalled
+ * because after that point drivers are allowed to free it.
+ *
+ * All code accessing that data via the dma-fence API (or directly, which is
+ * discouraged), MUST make sure to contain the complete access within a
+ * &rcu_read_lock and &rcu_read_unlock pair.
+ *
+ * Some dma-fence API handles this automatically, while other, as for example
+ * &dma_fence_driver_name and &dma_fence_timeline_name, leave that
+ * responsibility to the caller.
+ *
+ * To enable this scheme to work drivers MUST ensure a RCU grace period elapses
+ * between signalling the fence and freeing the said data.
+ *
+ */
+const char __rcu *dma_fence_driver_name(struct dma_fence *fence);
+const char __rcu *dma_fence_timeline_name(struct dma_fence *fence);
+
+/**
* dma_fence_is_signaled_locked - Return an indication if the fence
* is signaled yet.
* @fence: the fence to check
@@ -441,21 +461,20 @@ dma_fence_is_signaled(struct dma_fence *fence)
/**
* __dma_fence_is_later - return if f1 is chronologically later than f2
+ * @fence: fence in whose context to do the comparison
* @f1: the first fence's seqno
* @f2: the second fence's seqno from the same context
- * @ops: dma_fence_ops associated with the seqno
*
* Returns true if f1 is chronologically later than f2. Both fences must be
* from the same context, since a seqno is not common across contexts.
*/
-static inline bool __dma_fence_is_later(u64 f1, u64 f2,
- const struct dma_fence_ops *ops)
+static inline bool __dma_fence_is_later(struct dma_fence *fence, u64 f1, u64 f2)
{
/* This is for backward compatibility with drivers which can only handle
* 32bit sequence numbers. Use a 64bit compare when the driver says to
* do so.
*/
- if (ops->use_64bit_seqno)
+ if (test_bit(DMA_FENCE_FLAG_SEQNO64_BIT, &fence->flags))
return f1 > f2;
return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0;
@@ -475,7 +494,7 @@ static inline bool dma_fence_is_later(struct dma_fence *f1,
if (WARN_ON(f1->context != f2->context))
return false;
- return __dma_fence_is_later(f1->seqno, f2->seqno, f1->ops);
+ return __dma_fence_is_later(f1, f1->seqno, f2->seqno);
}
/**
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index 06c4de602b2f..7d40b51933d1 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -60,6 +60,14 @@ static inline struct dma_pool *dma_pool_create(const char *name,
NUMA_NO_NODE);
}
+/**
+ * dma_pool_zalloc - Get a zero-initialized block of DMA coherent memory.
+ * @pool: dma pool that will produce the block
+ * @mem_flags: GFP_* bitmask
+ * @handle: pointer to dma address of block
+ *
+ * Same as dma_pool_alloc(), but the returned memory is zeroed.
+ */
static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle)
{
diff --git a/include/linux/dpll.h b/include/linux/dpll.h
index 5e4f9ab1cf75..fa1e76920d0e 100644
--- a/include/linux/dpll.h
+++ b/include/linux/dpll.h
@@ -30,6 +30,14 @@ struct dpll_device_ops {
void *dpll_priv,
unsigned long *qls,
struct netlink_ext_ack *extack);
+ int (*phase_offset_monitor_set)(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state state,
+ struct netlink_ext_ack *extack);
+ int (*phase_offset_monitor_get)(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state *state,
+ struct netlink_ext_ack *extack);
};
struct dpll_pin_ops {
@@ -95,6 +103,16 @@ struct dpll_pin_ops {
const struct dpll_device *dpll, void *dpll_priv,
struct dpll_pin_esync *esync,
struct netlink_ext_ack *extack);
+ int (*ref_sync_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *ref_sync_pin,
+ void *ref_sync_pin_priv,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack);
+ int (*ref_sync_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *ref_sync_pin,
+ void *ref_sync_pin_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack);
};
struct dpll_pin_frequency {
@@ -194,6 +212,9 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
void dpll_pin_on_pin_unregister(struct dpll_pin *parent, struct dpll_pin *pin,
const struct dpll_pin_ops *ops, void *priv);
+int dpll_pin_ref_sync_pair_add(struct dpll_pin *pin,
+ struct dpll_pin *ref_sync_pin);
+
int dpll_device_change_ntf(struct dpll_device *dpll);
int dpll_pin_change_ntf(struct dpll_pin *pin);
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 5e0dd333ad1f..de5bd76a400c 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -536,7 +536,7 @@ struct ethtool_rmon_hist_range {
u16 high;
};
-#define ETHTOOL_RMON_HIST_MAX 10
+#define ETHTOOL_RMON_HIST_MAX 11
/**
* struct ethtool_rmon_stats - selected RMON (RFC 2819) statistics
@@ -826,6 +826,19 @@ struct ethtool_rxfh_param {
};
/**
+ * struct ethtool_rxfh_fields - Rx Flow Hashing (RXFH) header field config
+ * @data: which header fields are used for hashing, bitmask of RXH_* defines
+ * @flow_type: L2-L4 network traffic flow type
+ * @rss_context: RSS context, will only be used if rxfh_per_ctx_fields is
+ * set in struct ethtool_ops
+ */
+struct ethtool_rxfh_fields {
+ u32 data;
+ u32 flow_type;
+ u32 rss_context;
+};
+
+/**
* struct kernel_ethtool_ts_info - kernel copy of struct ethtool_ts_info
* @cmd: command number = %ETHTOOL_GET_TS_INFO
* @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags
@@ -852,9 +865,8 @@ struct kernel_ethtool_ts_info {
* @supported_input_xfrm: supported types of input xfrm from %RXH_XFRM_*.
* @cap_link_lanes_supported: indicates if the driver supports lanes
* parameter.
- * @cap_rss_ctx_supported: indicates if the driver supports RSS
- * contexts via legacy API, drivers implementing @create_rxfh_context
- * do not have to set this bit.
+ * @rxfh_per_ctx_fields: device supports selecting different header fields
+ * for Rx hash calculation and RSS for each additional context.
* @rxfh_per_ctx_key: device supports setting different RSS key for each
* additional context. Netlink API should report hfunc, key, and input_xfrm
* for every context, not just context 0.
@@ -968,6 +980,8 @@ struct kernel_ethtool_ts_info {
* will remain unchanged.
* Returns a negative error code or zero. An error code must be returned
* if at least one unsupported change was requested.
+ * @get_rxfh_fields: Get header fields used for flow hashing.
+ * @set_rxfh_fields: Set header fields used for flow hashing.
* @create_rxfh_context: Create a new RSS context with the specified RX flow
* hash indirection table, hash key, and hash function.
* The &struct ethtool_rxfh_context for this context is passed in @ctx;
@@ -1083,7 +1097,7 @@ struct kernel_ethtool_ts_info {
struct ethtool_ops {
u32 supported_input_xfrm:8;
u32 cap_link_lanes_supported:1;
- u32 cap_rss_ctx_supported:1;
+ u32 rxfh_per_ctx_fields:1;
u32 rxfh_per_ctx_key:1;
u32 cap_rss_rxnfc_adds:1;
u32 rxfh_indir_space;
@@ -1153,6 +1167,11 @@ struct ethtool_ops {
int (*get_rxfh)(struct net_device *, struct ethtool_rxfh_param *);
int (*set_rxfh)(struct net_device *, struct ethtool_rxfh_param *,
struct netlink_ext_ack *extack);
+ int (*get_rxfh_fields)(struct net_device *,
+ struct ethtool_rxfh_fields *);
+ int (*set_rxfh_fields)(struct net_device *,
+ const struct ethtool_rxfh_fields *,
+ struct netlink_ext_ack *extack);
int (*create_rxfh_context)(struct net_device *,
struct ethtool_rxfh_context *ctx,
const struct ethtool_rxfh_param *rxfh,
diff --git a/include/linux/ethtool_netlink.h b/include/linux/ethtool_netlink.h
index aba91335273a..39254b2726c0 100644
--- a/include/linux/ethtool_netlink.h
+++ b/include/linux/ethtool_netlink.h
@@ -43,6 +43,8 @@ void ethtool_aggregate_rmon_stats(struct net_device *dev,
struct ethtool_rmon_stats *rmon_stats);
bool ethtool_dev_mm_supported(struct net_device *dev);
+void ethnl_pse_send_ntf(struct net_device *netdev, unsigned long notif);
+
#else
static inline int ethnl_cable_test_alloc(struct phy_device *phydev, u8 cmd)
{
@@ -120,6 +122,11 @@ static inline bool ethtool_dev_mm_supported(struct net_device *dev)
return false;
}
+static inline void ethnl_pse_send_ntf(struct net_device *netdev,
+ unsigned long notif)
+{
+}
+
#endif /* IS_ENABLED(CONFIG_ETHTOOL_NETLINK) */
static inline int ethnl_cable_test_result(struct phy_device *phydev, u8 pair,
diff --git a/include/linux/filter.h b/include/linux/filter.h
index f5cf4d35d83e..1e7fd3ee759e 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -82,7 +82,7 @@ struct ctl_table_header;
#define BPF_CALL_ARGS 0xe0
/* unused opcode to mark speculation barrier for mitigating
- * Speculative Store Bypass
+ * Spectre v1 and v4
*/
#define BPF_NOSPEC 0xc0
@@ -1073,10 +1073,20 @@ bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
return set_memory_rox((unsigned long)hdr, hdr->size >> PAGE_SHIFT);
}
-int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
+int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap,
+ enum skb_drop_reason *reason);
+
static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
{
- return sk_filter_trim_cap(sk, skb, 1);
+ enum skb_drop_reason ignore_reason;
+
+ return sk_filter_trim_cap(sk, skb, 1, &ignore_reason);
+}
+
+static inline int sk_filter_reason(struct sock *sk, struct sk_buff *skb,
+ enum skb_drop_reason *reason)
+{
+ return sk_filter_trim_cap(sk, skb, 1, reason);
}
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
@@ -1278,6 +1288,8 @@ int bpf_jit_get_func_addr(const struct bpf_prog *prog,
const struct bpf_insn *insn, bool extra_pass,
u64 *func_addr, bool *func_addr_fixed);
+const char *bpf_jit_get_prog_name(struct bpf_prog *prog);
+
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
diff --git a/include/linux/find.h b/include/linux/find.h
index 5a2c267ea7f9..9d720ad92bc1 100644
--- a/include/linux/find.h
+++ b/include/linux/find.h
@@ -44,6 +44,8 @@ unsigned long _find_next_bit_le(const unsigned long *addr, unsigned
long size, unsigned long offset);
#endif
+unsigned long find_random_bit(const unsigned long *addr, unsigned long size);
+
#ifndef find_next_bit
/**
* find_next_bit - find the next set bit in a memory region
@@ -268,33 +270,6 @@ unsigned long find_nth_and_bit(const unsigned long *addr1, const unsigned long *
}
/**
- * find_nth_andnot_bit - find N'th set bit in 2 memory regions,
- * flipping bits in 2nd region
- * @addr1: The 1st address to start the search at
- * @addr2: The 2nd address to start the search at
- * @size: The maximum number of bits to search
- * @n: The number of set bit, which position is needed, counting from 0
- *
- * Returns the bit number of the N'th set bit.
- * If no such, returns @size.
- */
-static __always_inline
-unsigned long find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
- unsigned long size, unsigned long n)
-{
- if (n >= size)
- return size;
-
- if (small_const_nbits(size)) {
- unsigned long val = *addr1 & (~*addr2) & GENMASK(size - 1, 0);
-
- return val ? fns(val, n) : size;
- }
-
- return __find_nth_andnot_bit(addr1, addr2, size, n);
-}
-
-/**
* find_nth_and_andnot_bit - find N'th set bit in 2 memory regions,
* excluding those set in 3rd region
* @addr1: The 1st address to start the search at
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
index e4ce1cae03bf..b3b53f8c1b28 100644
--- a/include/linux/fortify-string.h
+++ b/include/linux/fortify-string.h
@@ -596,7 +596,7 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
if (p_size != SIZE_MAX && p_size < size)
fortify_panic(func, FORTIFY_WRITE, p_size, size, true);
else if (q_size != SIZE_MAX && q_size < size)
- fortify_panic(func, FORTIFY_READ, p_size, size, true);
+ fortify_panic(func, FORTIFY_READ, q_size, size, true);
/*
* Warn when writing beyond destination field size.
diff --git a/include/linux/fpga/adi-axi-common.h b/include/linux/fpga/adi-axi-common.h
deleted file mode 100644
index 141ac3f251e6..000000000000
--- a/include/linux/fpga/adi-axi-common.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Analog Devices AXI common registers & definitions
- *
- * Copyright 2019 Analog Devices Inc.
- *
- * https://wiki.analog.com/resources/fpga/docs/axi_ip
- * https://wiki.analog.com/resources/fpga/docs/hdl/regmap
- */
-
-#ifndef ADI_AXI_COMMON_H_
-#define ADI_AXI_COMMON_H_
-
-#define ADI_AXI_REG_VERSION 0x0000
-
-#define ADI_AXI_PCORE_VER(major, minor, patch) \
- (((major) << 16) | ((minor) << 8) | (patch))
-
-#define ADI_AXI_PCORE_VER_MAJOR(version) (((version) >> 16) & 0xff)
-#define ADI_AXI_PCORE_VER_MINOR(version) (((version) >> 8) & 0xff)
-#define ADI_AXI_PCORE_VER_PATCH(version) ((version) & 0xff)
-
-#endif /* ADI_AXI_COMMON_H_ */
diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h
index 702099f08929..7964db96e41a 100644
--- a/include/linux/fprobe.h
+++ b/include/linux/fprobe.h
@@ -94,6 +94,7 @@ int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num);
int register_fprobe_syms(struct fprobe *fp, const char **syms, int num);
int unregister_fprobe(struct fprobe *fp);
bool fprobe_is_registered(struct fprobe *fp);
+int fprobe_count_ips_from_filter(const char *filter, const char *notfilter);
#else
static inline int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter)
{
@@ -115,6 +116,10 @@ static inline bool fprobe_is_registered(struct fprobe *fp)
{
return false;
}
+static inline int fprobe_count_ips_from_filter(const char *filter, const char *notfilter)
+{
+ return -EOPNOTSUPP;
+}
#endif
/**
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 2ec4807d4ea8..d7ab4f96d705 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -200,12 +200,12 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/*
* The two FMODE_NONOTIFY* define which fsnotify events should not be generated
- * for a file. These are the possible values of (f->f_mode &
- * FMODE_FSNOTIFY_MASK) and their meaning:
+ * for an open file. These are the possible values of
+ * (f->f_mode & FMODE_FSNOTIFY_MASK) and their meaning:
*
* FMODE_NONOTIFY - suppress all (incl. non-permission) events.
* FMODE_NONOTIFY_PERM - suppress permission (incl. pre-content) events.
- * FMODE_NONOTIFY | FMODE_NONOTIFY_PERM - suppress only pre-content events.
+ * FMODE_NONOTIFY | FMODE_NONOTIFY_PERM - suppress only FAN_ACCESS_PERM.
*/
#define FMODE_FSNOTIFY_MASK \
(FMODE_NONOTIFY | FMODE_NONOTIFY_PERM)
@@ -213,13 +213,13 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define FMODE_FSNOTIFY_NONE(mode) \
((mode & FMODE_FSNOTIFY_MASK) == FMODE_NONOTIFY)
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
-#define FMODE_FSNOTIFY_PERM(mode) \
+#define FMODE_FSNOTIFY_HSM(mode) \
((mode & FMODE_FSNOTIFY_MASK) == 0 || \
(mode & FMODE_FSNOTIFY_MASK) == (FMODE_NONOTIFY | FMODE_NONOTIFY_PERM))
-#define FMODE_FSNOTIFY_HSM(mode) \
+#define FMODE_FSNOTIFY_ACCESS_PERM(mode) \
((mode & FMODE_FSNOTIFY_MASK) == 0)
#else
-#define FMODE_FSNOTIFY_PERM(mode) 0
+#define FMODE_FSNOTIFY_ACCESS_PERM(mode) 0
#define FMODE_FSNOTIFY_HSM(mode) 0
#endif
@@ -526,7 +526,7 @@ struct address_space {
/*
* On most architectures that alignment is already the case; but
* must be enforced here for CRIS, to let the least significant bit
- * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON.
+ * of struct folio's "mapping" pointer be used for FOLIO_MAPPING_ANON.
*/
/* XArray tags, for tagging dirty and writeback pages in the pagecache. */
@@ -1043,6 +1043,7 @@ struct fown_struct {
* and so were/are genuinely "ahead". Start next readahead when
* the first of these pages is accessed.
* @ra_pages: Maximum size of a readahead request, copied from the bdi.
+ * @order: Preferred folio order used for most recent readahead.
* @mmap_miss: How many mmap accesses missed in the page cache.
* @prev_pos: The last byte in the most recent read request.
*
@@ -1054,7 +1055,8 @@ struct file_ra_state {
unsigned int size;
unsigned int async_size;
unsigned int ra_pages;
- unsigned int mmap_miss;
+ unsigned short order;
+ unsigned short mmap_miss;
loff_t prev_pos;
};
@@ -3756,9 +3758,14 @@ void setattr_copy(struct mnt_idmap *, struct inode *inode,
extern int file_update_time(struct file *file);
+static inline bool file_is_dax(const struct file *file)
+{
+ return file && IS_DAX(file->f_mapping->host);
+}
+
static inline bool vma_is_dax(const struct vm_area_struct *vma)
{
- return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
+ return file_is_dax(vma->vm_file);
}
static inline bool vma_is_fsdax(struct vm_area_struct *vma)
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 454d8e466958..28a9cb13fbfa 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -129,7 +129,7 @@ static inline int fsnotify_file(struct file *file, __u32 mask)
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
-void file_set_fsnotify_mode_from_watchers(struct file *file);
+int fsnotify_open_perm_and_set_mode(struct file *file);
/*
* fsnotify_file_area_perm - permission hook before access to file range
@@ -147,9 +147,6 @@ static inline int fsnotify_file_area_perm(struct file *file, int perm_mask,
if (!(perm_mask & (MAY_READ | MAY_WRITE | MAY_ACCESS)))
return 0;
- if (likely(!FMODE_FSNOTIFY_PERM(file->f_mode)))
- return 0;
-
/*
* read()/write() and other types of access generate pre-content events.
*/
@@ -160,7 +157,8 @@ static inline int fsnotify_file_area_perm(struct file *file, int perm_mask,
return ret;
}
- if (!(perm_mask & MAY_READ))
+ if (!(perm_mask & MAY_READ) ||
+ likely(!FMODE_FSNOTIFY_ACCESS_PERM(file->f_mode)))
return 0;
/*
@@ -208,28 +206,10 @@ static inline int fsnotify_file_perm(struct file *file, int perm_mask)
return fsnotify_file_area_perm(file, perm_mask, NULL, 0);
}
-/*
- * fsnotify_open_perm - permission hook before file open
- */
-static inline int fsnotify_open_perm(struct file *file)
-{
- int ret;
-
- if (likely(!FMODE_FSNOTIFY_PERM(file->f_mode)))
- return 0;
-
- if (file->f_flags & __FMODE_EXEC) {
- ret = fsnotify_path(&file->f_path, FS_OPEN_EXEC_PERM);
- if (ret)
- return ret;
- }
-
- return fsnotify_path(&file->f_path, FS_OPEN_PERM);
-}
-
#else
-static inline void file_set_fsnotify_mode_from_watchers(struct file *file)
+static inline int fsnotify_open_perm_and_set_mode(struct file *file)
{
+ return 0;
}
static inline int fsnotify_file_area_perm(struct file *file, int perm_mask,
@@ -253,11 +233,6 @@ static inline int fsnotify_file_perm(struct file *file, int perm_mask)
{
return 0;
}
-
-static inline int fsnotify_open_perm(struct file *file)
-{
- return 0;
-}
#endif
/*
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index b672ca15f265..7ded7df6e9b5 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1108,7 +1108,7 @@ static __always_inline unsigned long get_lock_parent_ip(void)
# define trace_preempt_off(a0, a1) do { } while (0)
#endif
-#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_init(void);
#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
#define FTRACE_CALLSITE_SECTION "__patchable_function_entries"
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index be160e8d8bcb..5ebf26fcdcfa 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -423,9 +423,14 @@ static inline bool gfp_compaction_allowed(gfp_t gfp_mask)
extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
#ifdef CONFIG_CONTIG_ALLOC
+
+typedef unsigned int __bitwise acr_flags_t;
+#define ACR_FLAGS_NONE ((__force acr_flags_t)0) // ordinary allocation request
+#define ACR_FLAGS_CMA ((__force acr_flags_t)BIT(0)) // allocate for CMA
+
/* The below functions must be run on a range from a single zone. */
extern int alloc_contig_range_noprof(unsigned long start, unsigned long end,
- unsigned migratetype, gfp_t gfp_mask);
+ acr_flags_t alloc_flags, gfp_t gfp_mask);
#define alloc_contig_range(...) alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__))
extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 568a9d8c749b..2cc4f1e4ea96 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -1216,7 +1216,11 @@ static inline void hid_hw_wait(struct hid_device *hdev)
/**
* hid_report_len - calculate the report length
*
- * @report: the report we want to know the length
+ * @report: the report whose length we want to know
+ *
+ * The length counts the report ID byte, but only if the ID is nonzero
+ * and therefore is included in the report. Reports whose ID is zero
+ * never include an ID byte.
*/
static inline u32 hid_report_len(struct hid_report *report)
{
@@ -1239,6 +1243,8 @@ void hid_quirks_exit(__u16 bus);
dev_notice(&(hid)->dev, fmt, ##__VA_ARGS__)
#define hid_warn(hid, fmt, ...) \
dev_warn(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_warn_ratelimited(hid, fmt, ...) \
+ dev_warn_ratelimited(&(hid)->dev, fmt, ##__VA_ARGS__)
#define hid_info(hid, fmt, ...) \
dev_info(&(hid)->dev, fmt, ##__VA_ARGS__)
#define hid_dbg(hid, fmt, ...) \
diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
index 9a7683d79a4b..36053c3d6d64 100644
--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -195,7 +195,7 @@ static inline void *kmap_local_page_try_from_panic(struct page *page)
static inline void *kmap_local_folio(struct folio *folio, size_t offset)
{
- return page_address(&folio->page) + offset;
+ return folio_address(folio) + offset;
}
static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index e48d7f27b0b9..6234f316468c 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -292,12 +292,6 @@ static inline void zero_user_segment(struct page *page,
zero_user_segments(page, start, end, 0, 0);
}
-static inline void zero_user(struct page *page,
- unsigned start, unsigned size)
-{
- zero_user_segments(page, start, start + size, 0, 0);
-}
-
#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
static inline void copy_user_highpage(struct page *to, struct page *from,
@@ -688,10 +682,4 @@ static inline void folio_release_kmap(struct folio *folio, void *addr)
kunmap_local(addr);
folio_put(folio);
}
-
-static inline void unmap_and_put_page(struct page *page, void *addr)
-{
- folio_release_kmap(page_folio(page), addr);
-}
-
#endif /* _LINUX_HIGHMEM_H */
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
index 99fcf65d575f..0c4c84b8c3be 100644
--- a/include/linux/hisi_acc_qm.h
+++ b/include/linux/hisi_acc_qm.h
@@ -556,9 +556,9 @@ int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
struct hisi_acc_sgl_pool;
struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool,
- u32 index, dma_addr_t *hw_sgl_dma);
+ u32 index, dma_addr_t *hw_sgl_dma, enum dma_data_direction dir);
void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
- struct hisi_acc_hw_sgl *hw_sgl);
+ struct hisi_acc_hw_sgl *hw_sgl, enum dma_data_direction dir);
struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
u32 count, u32 sge_nr);
void hisi_acc_free_sgl_pool(struct device *dev,
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 2f190c90192d..7748489fde1b 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -37,8 +37,10 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr, pgprot_t newprot,
unsigned long cp_flags);
-vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
-vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
+vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn,
+ bool write);
+vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn,
+ bool write);
vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
bool write);
vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
@@ -261,7 +263,7 @@ static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
}
unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
- unsigned long vm_flags,
+ vm_flags_t vm_flags,
unsigned long tva_flags,
unsigned long orders);
@@ -282,7 +284,7 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
*/
static inline
unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
- unsigned long vm_flags,
+ vm_flags_t vm_flags,
unsigned long tva_flags,
unsigned long orders)
{
@@ -317,7 +319,7 @@ struct thpsize {
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
static inline bool vma_thp_disabled(struct vm_area_struct *vma,
- unsigned long vm_flags)
+ vm_flags_t vm_flags)
{
/*
* Explicitly disabled through madvise or prctl, or some
@@ -400,8 +402,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
#define split_huge_pmd(__vma, __pmd, __address) \
do { \
pmd_t *____pmd = (__pmd); \
- if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
- || pmd_devmap(*____pmd)) \
+ if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)) \
__split_huge_pmd(__vma, __pmd, __address, \
false); \
} while (0)
@@ -426,16 +427,14 @@ change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
#define split_huge_pud(__vma, __pud, __address) \
do { \
pud_t *____pud = (__pud); \
- if (pud_trans_huge(*____pud) \
- || pud_devmap(*____pud)) \
+ if (pud_trans_huge(*____pud)) \
__split_huge_pud(__vma, __pud, __address); \
} while (0)
-int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
+int hugepage_madvise(struct vm_area_struct *vma, vm_flags_t *vm_flags,
int advice);
-int madvise_collapse(struct vm_area_struct *vma,
- struct vm_area_struct **prev,
- unsigned long start, unsigned long end);
+int madvise_collapse(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, bool *lock_dropped);
void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct vm_area_struct *next);
spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
@@ -450,7 +449,7 @@ static inline int is_swap_pmd(pmd_t pmd)
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
- if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
+ if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd))
return __pmd_trans_huge_lock(pmd, vma);
else
return NULL;
@@ -458,7 +457,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
struct vm_area_struct *vma)
{
- if (pud_trans_huge(*pud) || pud_devmap(*pud))
+ if (pud_trans_huge(*pud))
return __pud_trans_huge_lock(pud, vma);
else
return NULL;
@@ -473,9 +472,6 @@ static inline bool folio_test_pmd_mappable(struct folio *folio)
return folio_order(folio) >= HPAGE_PMD_ORDER;
}
-struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
-
vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
extern struct folio *huge_zero_folio;
@@ -486,9 +482,14 @@ static inline bool is_huge_zero_folio(const struct folio *folio)
return READ_ONCE(huge_zero_folio) == folio;
}
+static inline bool is_huge_zero_pfn(unsigned long pfn)
+{
+ return READ_ONCE(huge_zero_pfn) == (pfn & ~(HPAGE_PMD_NR - 1));
+}
+
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
- return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
+ return pmd_present(pmd) && is_huge_zero_pfn(pmd_pfn(pmd));
}
struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
@@ -524,7 +525,7 @@ static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
}
static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
- unsigned long vm_flags,
+ vm_flags_t vm_flags,
unsigned long tva_flags,
unsigned long orders)
{
@@ -593,14 +594,14 @@ static inline bool unmap_huge_pmd_locked(struct vm_area_struct *vma,
do { } while (0)
static inline int hugepage_madvise(struct vm_area_struct *vma,
- unsigned long *vm_flags, int advice)
+ vm_flags_t *vm_flags, int advice)
{
return -EINVAL;
}
static inline int madvise_collapse(struct vm_area_struct *vma,
- struct vm_area_struct **prev,
- unsigned long start, unsigned long end)
+ unsigned long start,
+ unsigned long end, bool *lock_dropped)
{
return -EINVAL;
}
@@ -636,6 +637,11 @@ static inline bool is_huge_zero_folio(const struct folio *folio)
return false;
}
+static inline bool is_huge_zero_pfn(unsigned long pfn)
+{
+ return false;
+}
+
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
return false;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 42f374e828a2..526d27e88b3b 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -149,7 +149,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
uffd_flags_t flags,
struct folio **foliop);
#endif /* CONFIG_USERFAULTFD */
-bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
+long hugetlb_reserve_pages(struct inode *inode, long from, long to,
struct vm_area_struct *vma,
vm_flags_t vm_flags);
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
@@ -359,12 +359,6 @@ static inline void hugetlb_show_meminfo_node(int nid)
{
}
-static inline int prepare_hugepage_range(struct file *file,
- unsigned long addr, unsigned long len)
-{
- return -EINVAL;
-}
-
static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
{
}
@@ -396,13 +390,6 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
return 0;
}
-static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
- unsigned long addr, unsigned long end,
- unsigned long floor, unsigned long ceiling)
-{
- BUG();
-}
-
#ifdef CONFIG_USERFAULTFD
static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
struct vm_area_struct *dst_vma,
@@ -740,6 +727,11 @@ extern unsigned int default_hstate_idx;
#define default_hstate (hstates[default_hstate_idx])
+static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
+{
+ return HUGETLBFS_SB(inode->i_sb)->spool;
+}
+
static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
{
return folio->_hugetlb_subpool;
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 996be3c2cff0..e5a2096e022e 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -9,7 +9,7 @@
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (c) 2018 - 2024 Intel Corporation
+ * Copyright (c) 2018 - 2025 Intel Corporation
*/
#ifndef LINUX_IEEE80211_H
@@ -2825,11 +2825,12 @@ static inline bool ieee80211_he_capa_size_ok(const u8 *data, u8 len)
#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000
#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000
-#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0
-#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1
-#define IEEE80211_6GHZ_CTRL_REG_VLP_AP 2
-#define IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP 3
-#define IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP 4
+#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0
+#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1
+#define IEEE80211_6GHZ_CTRL_REG_VLP_AP 2
+#define IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP 3
+#define IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP_OLD 4
+#define IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP 8
/**
* struct ieee80211_he_6ghz_oper - HE 6 GHz operation Information field
@@ -2847,13 +2848,31 @@ struct ieee80211_he_6ghz_oper {
#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ 2
#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ 3
#define IEEE80211_HE_6GHZ_OPER_CTRL_DUP_BEACON 0x4
-#define IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO 0x38
+#define IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO 0x78
u8 control;
u8 ccfs0;
u8 ccfs1;
u8 minrate;
} __packed;
+/**
+ * enum ieee80211_reg_conn_bits - represents Regulatory connectivity field bits.
+ *
+ * This enumeration defines bit flags used to represent regulatory connectivity
+ * field bits.
+ *
+ * @IEEE80211_REG_CONN_LPI_VALID: Indicates whether the LPI bit is valid.
+ * @IEEE80211_REG_CONN_LPI_VALUE: Represents the value of the LPI bit.
+ * @IEEE80211_REG_CONN_SP_VALID: Indicates whether the SP bit is valid.
+ * @IEEE80211_REG_CONN_SP_VALUE: Represents the value of the SP bit.
+ */
+enum ieee80211_reg_conn_bits {
+ IEEE80211_REG_CONN_LPI_VALID = BIT(0),
+ IEEE80211_REG_CONN_LPI_VALUE = BIT(1),
+ IEEE80211_REG_CONN_SP_VALID = BIT(2),
+ IEEE80211_REG_CONN_SP_VALUE = BIT(3),
+};
+
/* transmit power interpretation type of transmit power envelope element */
enum ieee80211_tx_power_intrpt_type {
IEEE80211_TPE_LOCAL_EIRP,
@@ -3835,6 +3854,7 @@ enum ieee80211_eid_ext {
WLAN_EID_EXT_FILS_PUBLIC_KEY = 12,
WLAN_EID_EXT_FILS_NONCE = 13,
WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE = 14,
+ WLAN_EID_EXT_DH_PARAMETER = 32,
WLAN_EID_EXT_HE_CAPABILITY = 35,
WLAN_EID_EXT_HE_OPERATION = 36,
WLAN_EID_EXT_UORA = 37,
@@ -3858,6 +3878,8 @@ enum ieee80211_eid_ext {
WLAN_EID_EXT_EHT_CAPABILITY = 108,
WLAN_EID_EXT_TID_TO_LINK_MAPPING = 109,
WLAN_EID_EXT_BANDWIDTH_INDICATION = 135,
+ WLAN_EID_EXT_KNOWN_STA_IDENTIFCATION = 136,
+ WLAN_EID_EXT_NON_AP_STA_REG_CON = 137,
};
/* Action category code */
@@ -3995,6 +4017,16 @@ enum ieee80211_s1g_actioncode {
WLAN_S1G_TWT_INFORMATION = 11,
};
+/* Radio measurement action codes as defined in IEEE 802.11-2024 - Table 9-470 */
+enum ieee80211_radio_measurement_actioncode {
+ WLAN_RM_ACTION_RADIO_MEASUREMENT_REQUEST = 0,
+ WLAN_RM_ACTION_RADIO_MEASUREMENT_REPORT = 1,
+ WLAN_RM_ACTION_LINK_MEASUREMENT_REQUEST = 2,
+ WLAN_RM_ACTION_LINK_MEASUREMENT_REPORT = 3,
+ WLAN_RM_ACTION_NEIGHBOR_REPORT_REQUEST = 4,
+ WLAN_RM_ACTION_NEIGHBOR_REPORT_RESPONSE = 5,
+};
+
#define IEEE80211_WEP_IV_LEN 4
#define IEEE80211_WEP_ICV_LEN 4
#define IEEE80211_CCMP_HDR_LEN 8
@@ -5344,6 +5376,13 @@ static inline u16 ieee80211_mle_get_mld_capa_op(const u8 *data)
return get_unaligned_le16(common);
}
+/* Defined in Figure 9-1074t in P802.11be_D7.0 */
+#define IEEE80211_EHT_ML_EXT_MLD_CAPA_OP_PARAM_UPDATE 0x0001
+#define IEEE80211_EHT_ML_EXT_MLD_CAPA_OP_RECO_MAX_LINKS_MASK 0x001e
+#define IEEE80211_EHT_ML_EXT_MLD_CAPA_NSTR_UPDATE 0x0020
+#define IEEE80211_EHT_ML_EXT_MLD_CAPA_EMLSR_ENA_ON_ONE_LINK 0x0040
+#define IEEE80211_EHT_ML_EXT_MLD_CAPA_BTM_MLD_RECO_MULTI_AP 0x0080
+
/**
* ieee80211_mle_get_ext_mld_capa_op - returns the extended MLD capabilities
* and operations.
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index cdc684e04a2f..ce97d891cf72 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -191,8 +191,6 @@ struct team {
const struct header_ops *header_ops_cache;
- struct mutex lock; /* used for overall locking, e.g. port lists write */
-
/*
* List of enabled ports and their count
*/
@@ -223,7 +221,6 @@ struct team {
atomic_t count_pending;
struct delayed_work dw;
} mcast_rejoin;
- struct lock_class_key team_lock_key;
long mode_priv[TEAM_MODE_PRIV_LONGS];
};
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index 043d442994b0..80166eb62f41 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -19,11 +19,6 @@ struct tun_msg_ctl {
void *ptr;
};
-struct tun_xdp_hdr {
- int buflen;
- struct virtio_net_hdr gso;
-};
-
#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
struct socket *tun_get_socket(struct file *);
struct ptr_ring *tun_get_tx_ring(struct file *file);
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 38456b42cdb5..15e01935d3fa 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -79,11 +79,6 @@ static inline struct vlan_ethhdr *skb_vlan_eth_hdr(const struct sk_buff *skb)
/* found in socket.c */
extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
-static inline bool is_vlan_dev(const struct net_device *dev)
-{
- return dev->priv_flags & IFF_802_1Q_VLAN;
-}
-
#define skb_vlan_tag_present(__skb) (!!(__skb)->vlan_all)
#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci)
#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
@@ -136,7 +131,7 @@ struct vlan_pcpu_stats {
u32 tx_dropped;
};
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev,
__be16 vlan_proto, u16 vlan_id);
@@ -200,6 +195,11 @@ struct vlan_dev_priv {
#endif
};
+static inline bool is_vlan_dev(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_802_1Q_VLAN;
+}
+
static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
{
return netdev_priv(dev);
@@ -237,6 +237,11 @@ extern void vlan_vids_del_by_dev(struct net_device *dev,
extern bool vlan_uses_dev(const struct net_device *dev);
#else
+static inline bool is_vlan_dev(const struct net_device *dev)
+{
+ return false;
+}
+
static inline struct net_device *
__vlan_find_dev_deep_rcu(struct net_device *real_dev,
__be16 vlan_proto, u16 vlan_id)
@@ -254,19 +259,19 @@ vlan_for_each(struct net_device *dev,
static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
- BUG();
+ WARN_ON_ONCE(1);
return NULL;
}
static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
{
- BUG();
+ WARN_ON_ONCE(1);
return 0;
}
static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev)
{
- BUG();
+ WARN_ON_ONCE(1);
return 0;
}
diff --git a/include/linux/in6.h b/include/linux/in6.h
index 0777a21cbf86..403f926d33d8 100644
--- a/include/linux/in6.h
+++ b/include/linux/in6.h
@@ -18,6 +18,13 @@
#include <uapi/linux/in6.h>
+/* Large enough to hold both sockaddr_in and sockaddr_in6. */
+struct sockaddr_inet {
+ unsigned short sa_family;
+ char sa_data[sizeof(struct sockaddr_in6) -
+ sizeof(unsigned short)];
+};
+
/* IPv6 Wildcard Address (::) and Loopback Address (::1) defined in RFC2553
* NOTE: Be aware the IN6ADDR_* constants and in6addr_* externals are defined
* in network byte order, not in host byte order as are the IPv4 equivalents
diff --git a/include/linux/init.h b/include/linux/init.h
index c65a050d52a7..a60d32d227ee 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -51,7 +51,7 @@
discard it in modules) */
#define __init __section(".init.text") __cold __latent_entropy \
__noinitretpoline \
- __no_sanitize_coverage
+ __no_kstack_erase
#define __initdata __section(".init.data")
#define __initconst __section(".init.rodata")
#define __exitdata __section(".exit.data")
diff --git a/include/linux/intel_dg_nvm_aux.h b/include/linux/intel_dg_nvm_aux.h
new file mode 100644
index 000000000000..625d46a6b96e
--- /dev/null
+++ b/include/linux/intel_dg_nvm_aux.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2019-2025, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_DG_NVM_AUX_H__
+#define __INTEL_DG_NVM_AUX_H__
+
+#include <linux/auxiliary_bus.h>
+#include <linux/container_of.h>
+#include <linux/ioport.h>
+#include <linux/types.h>
+
+#define INTEL_DG_NVM_REGIONS 13
+
+struct intel_dg_nvm_region {
+ const char *name;
+};
+
+struct intel_dg_nvm_dev {
+ struct auxiliary_device aux_dev;
+ bool writable_override;
+ bool non_posted_erase;
+ struct resource bar;
+ struct resource bar2;
+ const struct intel_dg_nvm_region *regions;
+};
+
+#define auxiliary_dev_to_intel_dg_nvm_dev(auxiliary_dev) \
+ container_of(auxiliary_dev, struct intel_dg_nvm_dev, aux_dev)
+
+#endif /* __INTEL_DG_NVM_AUX_H__ */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 156732807994..c30d12e16473 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/of.h>
#include <linux/iova_bitmap.h>
+#include <uapi/linux/iommufd.h>
#define IOMMU_READ (1 << 0)
#define IOMMU_WRITE (1 << 1)
@@ -558,12 +559,52 @@ iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size,
}
/**
+ * __iommu_copy_struct_to_user - Report iommu driver specific user space data
+ * @dst_data: Pointer to a struct iommu_user_data for user space data location
+ * @src_data: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @data_type: The data type of the @src_data. Must match with @dst_data.type
+ * @data_len: Length of current user data structure, i.e. sizeof(struct _src)
+ * @min_len: Initial length of user data structure for backward compatibility.
+ * This should be offsetofend using the last member in the user data
+ * struct that was initially added to include/uapi/linux/iommufd.h
+ */
+static inline int
+__iommu_copy_struct_to_user(const struct iommu_user_data *dst_data,
+ void *src_data, unsigned int data_type,
+ size_t data_len, size_t min_len)
+{
+ if (WARN_ON(!dst_data || !src_data))
+ return -EINVAL;
+ if (dst_data->type != data_type)
+ return -EINVAL;
+ if (dst_data->len < min_len || data_len < dst_data->len)
+ return -EINVAL;
+ return copy_struct_to_user(dst_data->uptr, dst_data->len, src_data,
+ data_len, NULL);
+}
+
+/**
+ * iommu_copy_struct_to_user - Report iommu driver specific user space data
+ * @user_data: Pointer to a struct iommu_user_data for user space data location
+ * @ksrc: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @data_type: The data type of the @ksrc. Must match with @user_data->type
+ * @min_last: The last member of the data structure @ksrc points in the initial
+ * version.
+ * Return 0 for success, otherwise -error.
+ */
+#define iommu_copy_struct_to_user(user_data, ksrc, data_type, min_last) \
+ __iommu_copy_struct_to_user(user_data, ksrc, data_type, sizeof(*ksrc), \
+ offsetofend(typeof(*ksrc), min_last))
+
+/**
* struct iommu_ops - iommu ops and capabilities
* @capable: check capability
* @hw_info: report iommu hardware information. The data buffer returned by this
* op is allocated in the iommu driver and freed by the caller after
- * use. The information type is one of enum iommu_hw_info_type defined
- * in include/uapi/linux/iommufd.h.
+ * use. @type can input a requested type and output a supported type.
+ * Driver should reject an unsupported data @type input
* @domain_alloc: Do not use in new drivers
* @domain_alloc_identity: allocate an IDENTITY domain. Drivers should prefer to
* use identity_domain instead. This should only be used
@@ -596,15 +637,16 @@ iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size,
* - IOMMU_DOMAIN_DMA: must use a dma domain
* - 0: use the default setting
* @default_domain_ops: the default ops for domains
- * @viommu_alloc: Allocate an iommufd_viommu on a physical IOMMU instance behind
- * the @dev, as the set of virtualization resources shared/passed
- * to user space IOMMU instance. And associate it with a nesting
- * @parent_domain. The @viommu_type must be defined in the header
- * include/uapi/linux/iommufd.h
- * It is required to call iommufd_viommu_alloc() helper for
- * a bundled allocation of the core and the driver structures,
- * using the given @ictx pointer.
- * @pgsize_bitmap: bitmap of all possible supported page sizes
+ * @get_viommu_size: Get the size of a driver-level vIOMMU structure for a given
+ * @dev corresponding to @viommu_type. Driver should return 0
+ * if vIOMMU isn't supported accordingly. It is required for
+ * driver to use the VIOMMU_STRUCT_SIZE macro to sanitize the
+ * driver-level vIOMMU structure related to the core one
+ * @viommu_init: Init the driver-level struct of an iommufd_viommu on a physical
+ * IOMMU instance @viommu->iommu_dev, as the set of virtualization
+ * resources shared/passed to user space IOMMU instance. Associate
+ * it with a nesting @parent_domain. It is required for driver to
+ * set @viommu->ops pointing to its own viommu_ops
* @owner: Driver module providing these ops
* @identity_domain: An always available, always attachable identity
* translation.
@@ -620,7 +662,8 @@ iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size,
*/
struct iommu_ops {
bool (*capable)(struct device *dev, enum iommu_cap);
- void *(*hw_info)(struct device *dev, u32 *length, u32 *type);
+ void *(*hw_info)(struct device *dev, u32 *length,
+ enum iommu_hw_info_type *type);
/* Domain allocation and freeing by the iommu driver */
#if IS_ENABLED(CONFIG_FSL_PAMU)
@@ -654,12 +697,13 @@ struct iommu_ops {
int (*def_domain_type)(struct device *dev);
- struct iommufd_viommu *(*viommu_alloc)(
- struct device *dev, struct iommu_domain *parent_domain,
- struct iommufd_ctx *ictx, unsigned int viommu_type);
+ size_t (*get_viommu_size)(struct device *dev,
+ enum iommu_viommu_type viommu_type);
+ int (*viommu_init)(struct iommufd_viommu *viommu,
+ struct iommu_domain *parent_domain,
+ const struct iommu_user_data *user_data);
const struct iommu_domain_ops *default_domain_ops;
- unsigned long pgsize_bitmap;
struct module *owner;
struct iommu_domain *identity_domain;
struct iommu_domain *blocked_domain;
diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h
index 34b6e6ca4bfa..6e7efe83bc5d 100644
--- a/include/linux/iommufd.h
+++ b/include/linux/iommufd.h
@@ -37,6 +37,7 @@ enum iommufd_object_type {
IOMMUFD_OBJ_VIOMMU,
IOMMUFD_OBJ_VDEVICE,
IOMMUFD_OBJ_VEVENTQ,
+ IOMMUFD_OBJ_HW_QUEUE,
#ifdef CONFIG_IOMMUFD_TEST
IOMMUFD_OBJ_SELFTEST,
#endif
@@ -45,7 +46,13 @@ enum iommufd_object_type {
/* Base struct for all objects with a userspace ID handle. */
struct iommufd_object {
- refcount_t shortterm_users;
+ /*
+ * Destroy will sleep and wait for wait_cnt to go to zero. This allows
+ * concurrent users of the ID to reliably avoid causing a spurious
+ * destroy failure. Incrementing this count should either be short
+ * lived or be revoked and blocked during pre_destroy().
+ */
+ refcount_t wait_cnt;
refcount_t users;
enum iommufd_object_type type;
unsigned int id;
@@ -101,7 +108,36 @@ struct iommufd_viommu {
struct list_head veventqs;
struct rw_semaphore veventqs_rwsem;
- unsigned int type;
+ enum iommu_viommu_type type;
+};
+
+struct iommufd_vdevice {
+ struct iommufd_object obj;
+ struct iommufd_viommu *viommu;
+ struct iommufd_device *idev;
+
+ /*
+ * Virtual device ID per vIOMMU, e.g. vSID of ARM SMMUv3, vDeviceID of
+ * AMD IOMMU, and vRID of Intel VT-d
+ */
+ u64 virt_id;
+
+ /* Clean up all driver-specific parts of an iommufd_vdevice */
+ void (*destroy)(struct iommufd_vdevice *vdev);
+};
+
+struct iommufd_hw_queue {
+ struct iommufd_object obj;
+ struct iommufd_viommu *viommu;
+ struct iommufd_access *access;
+
+ u64 base_addr; /* in guest physical address space */
+ size_t length;
+
+ enum iommu_hw_queue_type type;
+
+ /* Clean up all driver-specific parts of an iommufd_hw_queue */
+ void (*destroy)(struct iommufd_hw_queue *hw_queue);
};
/**
@@ -120,6 +156,30 @@ struct iommufd_viommu {
* array->entry_num to report the number of handled requests.
* The data structure of the array entry must be defined in
* include/uapi/linux/iommufd.h
+ * @vdevice_size: Size of the driver-defined vDEVICE structure per this vIOMMU
+ * @vdevice_init: Initialize the driver-level structure of a vDEVICE object, or
+ * related HW procedure. @vdev is already initialized by iommufd
+ * core: vdev->dev and vdev->viommu pointers; vdev->id carries a
+ * per-vIOMMU virtual ID (refer to struct iommu_vdevice_alloc in
+ * include/uapi/linux/iommufd.h)
+ * If driver has a deinit function to revert what vdevice_init op
+ * does, it should set it to the @vdev->destroy function pointer
+ * @get_hw_queue_size: Get the size of a driver-defined HW queue structure for a
+ * given @viommu corresponding to @queue_type. Driver should
+ * return 0 if HW queue aren't supported accordingly. It is
+ * required for driver to use the HW_QUEUE_STRUCT_SIZE macro
+ * to sanitize the driver-level HW queue structure related
+ * to the core one
+ * @hw_queue_init_phys: Initialize the driver-level structure of a HW queue that
+ * is initialized with its core-level structure that holds
+ * all the info about a guest queue memory.
+ * Driver providing this op indicates that HW accesses the
+ * guest queue memory via physical addresses.
+ * @index carries the logical HW QUEUE ID per vIOMMU in a
+ * guest VM, for a multi-queue model. @base_addr_pa carries
+ * the physical location of the guest queue
+ * If driver has a deinit function to revert what this op
+ * does, it should set it to the @hw_queue->destroy pointer
*/
struct iommufd_viommu_ops {
void (*destroy)(struct iommufd_viommu *viommu);
@@ -128,6 +188,13 @@ struct iommufd_viommu_ops {
const struct iommu_user_data *user_data);
int (*cache_invalidate)(struct iommufd_viommu *viommu,
struct iommu_user_data_array *array);
+ const size_t vdevice_size;
+ int (*vdevice_init)(struct iommufd_vdevice *vdev);
+ size_t (*get_hw_queue_size)(struct iommufd_viommu *viommu,
+ enum iommu_hw_queue_type queue_type);
+ /* AMD's HW will add hw_queue_init simply using @hw_queue->base_addr */
+ int (*hw_queue_init_phys)(struct iommufd_hw_queue *hw_queue, u32 index,
+ phys_addr_t base_addr_pa);
};
#if IS_ENABLED(CONFIG_IOMMUFD)
@@ -171,8 +238,9 @@ static inline void iommufd_access_unpin_pages(struct iommufd_access *access,
{
}
-static inline int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
- void *data, size_t len, unsigned int flags)
+static inline int iommufd_access_rw(struct iommufd_access *access,
+ unsigned long iova, void *data, size_t len,
+ unsigned int flags)
{
return -EOPNOTSUPP;
}
@@ -189,9 +257,16 @@ static inline int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx)
#endif /* CONFIG_IOMMUFD */
#if IS_ENABLED(CONFIG_IOMMUFD_DRIVER_CORE)
-struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
- size_t size,
- enum iommufd_object_type type);
+int _iommufd_object_depend(struct iommufd_object *obj_dependent,
+ struct iommufd_object *obj_depended);
+void _iommufd_object_undepend(struct iommufd_object *obj_dependent,
+ struct iommufd_object *obj_depended);
+int _iommufd_alloc_mmap(struct iommufd_ctx *ictx, struct iommufd_object *owner,
+ phys_addr_t mmio_addr, size_t length,
+ unsigned long *offset);
+void _iommufd_destroy_mmap(struct iommufd_ctx *ictx,
+ struct iommufd_object *owner, unsigned long offset);
+struct device *iommufd_vdevice_to_device(struct iommufd_vdevice *vdev);
struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu,
unsigned long vdev_id);
int iommufd_viommu_get_vdev_id(struct iommufd_viommu *viommu,
@@ -200,11 +275,36 @@ int iommufd_viommu_report_event(struct iommufd_viommu *viommu,
enum iommu_veventq_type type, void *event_data,
size_t data_len);
#else /* !CONFIG_IOMMUFD_DRIVER_CORE */
-static inline struct iommufd_object *
-_iommufd_object_alloc(struct iommufd_ctx *ictx, size_t size,
- enum iommufd_object_type type)
+static inline int _iommufd_object_depend(struct iommufd_object *obj_dependent,
+ struct iommufd_object *obj_depended)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void
+_iommufd_object_undepend(struct iommufd_object *obj_dependent,
+ struct iommufd_object *obj_depended)
+{
+}
+
+static inline int _iommufd_alloc_mmap(struct iommufd_ctx *ictx,
+ struct iommufd_object *owner,
+ phys_addr_t mmio_addr, size_t length,
+ unsigned long *offset)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void _iommufd_destroy_mmap(struct iommufd_ctx *ictx,
+ struct iommufd_object *owner,
+ unsigned long offset)
{
- return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct device *
+iommufd_vdevice_to_device(struct iommufd_vdevice *vdev)
+{
+ return NULL;
}
static inline struct device *
@@ -228,21 +328,73 @@ static inline int iommufd_viommu_report_event(struct iommufd_viommu *viommu,
}
#endif /* CONFIG_IOMMUFD_DRIVER_CORE */
+#define VIOMMU_STRUCT_SIZE(drv_struct, member) \
+ (sizeof(drv_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof(drv_struct, member)) + \
+ BUILD_BUG_ON_ZERO(!__same_type(struct iommufd_viommu, \
+ ((drv_struct *)NULL)->member)))
+
+#define VDEVICE_STRUCT_SIZE(drv_struct, member) \
+ (sizeof(drv_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof(drv_struct, member)) + \
+ BUILD_BUG_ON_ZERO(!__same_type(struct iommufd_vdevice, \
+ ((drv_struct *)NULL)->member)))
+
+#define HW_QUEUE_STRUCT_SIZE(drv_struct, member) \
+ (sizeof(drv_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof(drv_struct, member)) + \
+ BUILD_BUG_ON_ZERO(!__same_type(struct iommufd_hw_queue, \
+ ((drv_struct *)NULL)->member)))
+
/*
- * Helpers for IOMMU driver to allocate driver structures that will be freed by
- * the iommufd core. The free op will be called prior to freeing the memory.
+ * Helpers for IOMMU driver to build/destroy a dependency between two sibling
+ * structures created by one of the allocators above
*/
-#define iommufd_viommu_alloc(ictx, drv_struct, member, viommu_ops) \
+#define iommufd_hw_queue_depend(dependent, depended, member) \
({ \
- drv_struct *ret; \
+ int ret = -EINVAL; \
\
- static_assert(__same_type(struct iommufd_viommu, \
- ((drv_struct *)NULL)->member)); \
- static_assert(offsetof(drv_struct, member.obj) == 0); \
- ret = (drv_struct *)_iommufd_object_alloc( \
- ictx, sizeof(drv_struct), IOMMUFD_OBJ_VIOMMU); \
- if (!IS_ERR(ret)) \
- ret->member.ops = viommu_ops; \
+ static_assert(__same_type(struct iommufd_hw_queue, \
+ dependent->member)); \
+ static_assert(__same_type(typeof(*dependent), *depended)); \
+ if (!WARN_ON_ONCE(dependent->member.viommu != \
+ depended->member.viommu)) \
+ ret = _iommufd_object_depend(&dependent->member.obj, \
+ &depended->member.obj); \
ret; \
})
+
+#define iommufd_hw_queue_undepend(dependent, depended, member) \
+ ({ \
+ static_assert(__same_type(struct iommufd_hw_queue, \
+ dependent->member)); \
+ static_assert(__same_type(typeof(*dependent), *depended)); \
+ WARN_ON_ONCE(dependent->member.viommu != \
+ depended->member.viommu); \
+ _iommufd_object_undepend(&dependent->member.obj, \
+ &depended->member.obj); \
+ })
+
+/*
+ * Helpers for IOMMU driver to alloc/destroy an mmapable area for a structure.
+ *
+ * To support an mmappable MMIO region, kernel driver must first register it to
+ * iommufd core to allocate an @offset, during a driver-structure initialization
+ * (e.g. viommu_init op). Then, it should report to user space this @offset and
+ * the @length of the MMIO region for mmap syscall.
+ */
+static inline int iommufd_viommu_alloc_mmap(struct iommufd_viommu *viommu,
+ phys_addr_t mmio_addr,
+ size_t length,
+ unsigned long *offset)
+{
+ return _iommufd_alloc_mmap(viommu->ictx, &viommu->obj, mmio_addr,
+ length, offset);
+}
+
+static inline void iommufd_viommu_destroy_mmap(struct iommufd_viommu *viommu,
+ unsigned long offset)
+{
+ _iommufd_destroy_mmap(viommu->ictx, &viommu->obj, offset);
+}
#endif
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 5aeeed22f35b..bc6ec2959173 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -17,6 +17,7 @@ struct ipv6_devconf {
__s32 hop_limit;
__s32 mtu6;
__s32 forwarding;
+ __s32 force_forwarding;
__s32 disable_policy;
__s32 proxy_ndp;
__cacheline_group_end(ipv6_devconf_read_txrx);
@@ -156,6 +157,7 @@ struct inet6_skb_parm {
#define IP6SKB_SEG6 256
#define IP6SKB_FAKEJUMBO 512
#define IP6SKB_MULTIPATH 1024
+#define IP6SKB_MCROUTE 2048
};
#if defined(CONFIG_NET_L3_MASTER_DEV)
diff --git a/include/linux/irq-entry-common.h b/include/linux/irq-entry-common.h
index 8af374331900..4fb2f93d82ed 100644
--- a/include/linux/irq-entry-common.h
+++ b/include/linux/irq-entry-common.h
@@ -49,6 +49,22 @@ static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {}
#endif
/**
+ * arch_in_rcu_eqs - Architecture specific check for RCU extended quiescent
+ * states.
+ *
+ * Returns: true if the CPU is potentially in an RCU EQS, false otherwise.
+ *
+ * Architectures only need to define this if threads other than the idle thread
+ * may have an interruptible EQS. This does not need to handle idle threads. It
+ * is safe to over-estimate at the cost of redundant RCU management work.
+ *
+ * Invoked from irqentry_enter()
+ */
+#ifndef arch_in_rcu_eqs
+static __always_inline bool arch_in_rcu_eqs(void) { return false; }
+#endif
+
+/**
* enter_from_user_mode - Establish state when coming from user mode
*
* Syscall/interrupt entry disables interrupts, but user mode is traced as
diff --git a/include/linux/irqbypass.h b/include/linux/irqbypass.h
index 9bdb2a781841..ede1fa938152 100644
--- a/include/linux/irqbypass.h
+++ b/include/linux/irqbypass.h
@@ -10,6 +10,7 @@
#include <linux/list.h>
+struct eventfd_ctx;
struct irq_bypass_consumer;
/*
@@ -18,20 +19,22 @@ struct irq_bypass_consumer;
* The IRQ bypass manager is a simple set of lists and callbacks that allows
* IRQ producers (ex. physical interrupt sources) to be matched to IRQ
* consumers (ex. virtualization hardware that allows IRQ bypass or offload)
- * via a shared token (ex. eventfd_ctx). Producers and consumers register
- * independently. When a token match is found, the optional @stop callback
- * will be called for each participant. The pair will then be connected via
- * the @add_* callbacks, and finally the optional @start callback will allow
- * any final coordination. When either participant is unregistered, the
- * process is repeated using the @del_* callbacks in place of the @add_*
- * callbacks. Match tokens must be unique per producer/consumer, 1:N pairings
- * are not supported.
+ * via a shared eventfd_ctx. Producers and consumers register independently.
+ * When a producer and consumer are paired, i.e. an eventfd match is found, the
+ * optional @stop callback will be called for each participant. The pair will
+ * then be connected via the @add_* callbacks, and finally the optional @start
+ * callback will allow any final coordination. When either participant is
+ * unregistered, the process is repeated using the @del_* callbacks in place of
+ * the @add_* callbacks. eventfds must be unique per producer/consumer, 1:N
+ * pairings are not supported.
*/
+struct irq_bypass_consumer;
+
/**
* struct irq_bypass_producer - IRQ bypass producer definition
- * @node: IRQ bypass manager private list management
- * @token: opaque token to match between producer and consumer (non-NULL)
+ * @eventfd: eventfd context used to match producers and consumers
+ * @consumer: The connected consumer (NULL if no connection)
* @irq: Linux IRQ number for the producer device
* @add_consumer: Connect the IRQ producer to an IRQ consumer (optional)
* @del_consumer: Disconnect the IRQ producer from an IRQ consumer (optional)
@@ -43,8 +46,8 @@ struct irq_bypass_consumer;
* for a physical device assigned to a VM.
*/
struct irq_bypass_producer {
- struct list_head node;
- void *token;
+ struct eventfd_ctx *eventfd;
+ struct irq_bypass_consumer *consumer;
int irq;
int (*add_consumer)(struct irq_bypass_producer *,
struct irq_bypass_consumer *);
@@ -56,8 +59,8 @@ struct irq_bypass_producer {
/**
* struct irq_bypass_consumer - IRQ bypass consumer definition
- * @node: IRQ bypass manager private list management
- * @token: opaque token to match between producer and consumer (non-NULL)
+ * @eventfd: eventfd context used to match producers and consumers
+ * @producer: The connected producer (NULL if no connection)
* @add_producer: Connect the IRQ consumer to an IRQ producer
* @del_producer: Disconnect the IRQ consumer from an IRQ producer
* @stop: Perform any quiesce operations necessary prior to add/del (optional)
@@ -69,8 +72,9 @@ struct irq_bypass_producer {
* portions of the interrupt handling to the VM.
*/
struct irq_bypass_consumer {
- struct list_head node;
- void *token;
+ struct eventfd_ctx *eventfd;
+ struct irq_bypass_producer *producer;
+
int (*add_producer)(struct irq_bypass_consumer *,
struct irq_bypass_producer *);
void (*del_producer)(struct irq_bypass_consumer *,
@@ -79,9 +83,11 @@ struct irq_bypass_consumer {
void (*start)(struct irq_bypass_consumer *);
};
-int irq_bypass_register_producer(struct irq_bypass_producer *);
-void irq_bypass_unregister_producer(struct irq_bypass_producer *);
-int irq_bypass_register_consumer(struct irq_bypass_consumer *);
-void irq_bypass_unregister_consumer(struct irq_bypass_consumer *);
+int irq_bypass_register_producer(struct irq_bypass_producer *producer,
+ struct eventfd_ctx *eventfd, int irq);
+void irq_bypass_unregister_producer(struct irq_bypass_producer *producer);
+int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer,
+ struct eventfd_ctx *eventfd);
+void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer);
#endif /* IRQBYPASS_H */
diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h
index 7f1f11a5e4e4..0b0887099fd7 100644
--- a/include/linux/irqchip/arm-gic-v4.h
+++ b/include/linux/irqchip/arm-gic-v4.h
@@ -146,7 +146,7 @@ int its_commit_vpe(struct its_vpe *vpe);
int its_invall_vpe(struct its_vpe *vpe);
int its_map_vlpi(int irq, struct its_vlpi_map *map);
int its_get_vlpi(int irq, struct its_vlpi_map *map);
-int its_unmap_vlpi(int irq);
+void its_unmap_vlpi(int irq);
int its_prop_update_vlpi(int irq, u8 config, bool inv);
int its_prop_update_vsgi(int irq, u8 priority, bool group);
diff --git a/include/linux/irqchip/arm-gic-v5.h b/include/linux/irqchip/arm-gic-v5.h
new file mode 100644
index 000000000000..68ddcdb1cec5
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-v5.h
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2025 ARM Limited, All Rights Reserved.
+ */
+#ifndef __LINUX_IRQCHIP_ARM_GIC_V5_H
+#define __LINUX_IRQCHIP_ARM_GIC_V5_H
+
+#include <linux/iopoll.h>
+
+#include <asm/cacheflush.h>
+#include <asm/smp.h>
+#include <asm/sysreg.h>
+
+#define GICV5_IPIS_PER_CPU MAX_IPI
+
+/*
+ * INTID handling
+ */
+#define GICV5_HWIRQ_ID GENMASK(23, 0)
+#define GICV5_HWIRQ_TYPE GENMASK(31, 29)
+#define GICV5_HWIRQ_INTID GENMASK_ULL(31, 0)
+
+#define GICV5_HWIRQ_TYPE_PPI UL(0x1)
+#define GICV5_HWIRQ_TYPE_LPI UL(0x2)
+#define GICV5_HWIRQ_TYPE_SPI UL(0x3)
+
+/*
+ * Tables attributes
+ */
+#define GICV5_NO_READ_ALLOC 0b0
+#define GICV5_READ_ALLOC 0b1
+#define GICV5_NO_WRITE_ALLOC 0b0
+#define GICV5_WRITE_ALLOC 0b1
+
+#define GICV5_NON_CACHE 0b00
+#define GICV5_WB_CACHE 0b01
+#define GICV5_WT_CACHE 0b10
+
+#define GICV5_NON_SHARE 0b00
+#define GICV5_OUTER_SHARE 0b10
+#define GICV5_INNER_SHARE 0b11
+
+/*
+ * IRS registers and tables structures
+ */
+#define GICV5_IRS_IDR1 0x0004
+#define GICV5_IRS_IDR2 0x0008
+#define GICV5_IRS_IDR5 0x0014
+#define GICV5_IRS_IDR6 0x0018
+#define GICV5_IRS_IDR7 0x001c
+#define GICV5_IRS_CR0 0x0080
+#define GICV5_IRS_CR1 0x0084
+#define GICV5_IRS_SYNCR 0x00c0
+#define GICV5_IRS_SYNC_STATUSR 0x00c4
+#define GICV5_IRS_SPI_SELR 0x0108
+#define GICV5_IRS_SPI_CFGR 0x0114
+#define GICV5_IRS_SPI_STATUSR 0x0118
+#define GICV5_IRS_PE_SELR 0x0140
+#define GICV5_IRS_PE_STATUSR 0x0144
+#define GICV5_IRS_PE_CR0 0x0148
+#define GICV5_IRS_IST_BASER 0x0180
+#define GICV5_IRS_IST_CFGR 0x0190
+#define GICV5_IRS_IST_STATUSR 0x0194
+#define GICV5_IRS_MAP_L2_ISTR 0x01c0
+
+#define GICV5_IRS_IDR1_PRIORITY_BITS GENMASK(22, 20)
+#define GICV5_IRS_IDR1_IAFFID_BITS GENMASK(19, 16)
+
+#define GICV5_IRS_IDR1_PRIORITY_BITS_1BITS 0b000
+#define GICV5_IRS_IDR1_PRIORITY_BITS_2BITS 0b001
+#define GICV5_IRS_IDR1_PRIORITY_BITS_3BITS 0b010
+#define GICV5_IRS_IDR1_PRIORITY_BITS_4BITS 0b011
+#define GICV5_IRS_IDR1_PRIORITY_BITS_5BITS 0b100
+
+#define GICV5_IRS_IDR2_ISTMD_SZ GENMASK(19, 15)
+#define GICV5_IRS_IDR2_ISTMD BIT(14)
+#define GICV5_IRS_IDR2_IST_L2SZ GENMASK(13, 11)
+#define GICV5_IRS_IDR2_IST_LEVELS BIT(10)
+#define GICV5_IRS_IDR2_MIN_LPI_ID_BITS GENMASK(9, 6)
+#define GICV5_IRS_IDR2_LPI BIT(5)
+#define GICV5_IRS_IDR2_ID_BITS GENMASK(4, 0)
+
+#define GICV5_IRS_IDR5_SPI_RANGE GENMASK(24, 0)
+#define GICV5_IRS_IDR6_SPI_IRS_RANGE GENMASK(24, 0)
+#define GICV5_IRS_IDR7_SPI_BASE GENMASK(23, 0)
+
+#define GICV5_IRS_IST_L2SZ_SUPPORT_4KB(r) FIELD_GET(BIT(11), (r))
+#define GICV5_IRS_IST_L2SZ_SUPPORT_16KB(r) FIELD_GET(BIT(12), (r))
+#define GICV5_IRS_IST_L2SZ_SUPPORT_64KB(r) FIELD_GET(BIT(13), (r))
+
+#define GICV5_IRS_CR0_IDLE BIT(1)
+#define GICV5_IRS_CR0_IRSEN BIT(0)
+
+#define GICV5_IRS_CR1_VPED_WA BIT(15)
+#define GICV5_IRS_CR1_VPED_RA BIT(14)
+#define GICV5_IRS_CR1_VMD_WA BIT(13)
+#define GICV5_IRS_CR1_VMD_RA BIT(12)
+#define GICV5_IRS_CR1_VPET_WA BIT(11)
+#define GICV5_IRS_CR1_VPET_RA BIT(10)
+#define GICV5_IRS_CR1_VMT_WA BIT(9)
+#define GICV5_IRS_CR1_VMT_RA BIT(8)
+#define GICV5_IRS_CR1_IST_WA BIT(7)
+#define GICV5_IRS_CR1_IST_RA BIT(6)
+#define GICV5_IRS_CR1_IC GENMASK(5, 4)
+#define GICV5_IRS_CR1_OC GENMASK(3, 2)
+#define GICV5_IRS_CR1_SH GENMASK(1, 0)
+
+#define GICV5_IRS_SYNCR_SYNC BIT(31)
+
+#define GICV5_IRS_SYNC_STATUSR_IDLE BIT(0)
+
+#define GICV5_IRS_SPI_STATUSR_V BIT(1)
+#define GICV5_IRS_SPI_STATUSR_IDLE BIT(0)
+
+#define GICV5_IRS_SPI_SELR_ID GENMASK(23, 0)
+
+#define GICV5_IRS_SPI_CFGR_TM BIT(0)
+
+#define GICV5_IRS_PE_SELR_IAFFID GENMASK(15, 0)
+
+#define GICV5_IRS_PE_STATUSR_V BIT(1)
+#define GICV5_IRS_PE_STATUSR_IDLE BIT(0)
+
+#define GICV5_IRS_PE_CR0_DPS BIT(0)
+
+#define GICV5_IRS_IST_STATUSR_IDLE BIT(0)
+
+#define GICV5_IRS_IST_CFGR_STRUCTURE BIT(16)
+#define GICV5_IRS_IST_CFGR_ISTSZ GENMASK(8, 7)
+#define GICV5_IRS_IST_CFGR_L2SZ GENMASK(6, 5)
+#define GICV5_IRS_IST_CFGR_LPI_ID_BITS GENMASK(4, 0)
+
+#define GICV5_IRS_IST_CFGR_STRUCTURE_LINEAR 0b0
+#define GICV5_IRS_IST_CFGR_STRUCTURE_TWO_LEVEL 0b1
+
+#define GICV5_IRS_IST_CFGR_ISTSZ_4 0b00
+#define GICV5_IRS_IST_CFGR_ISTSZ_8 0b01
+#define GICV5_IRS_IST_CFGR_ISTSZ_16 0b10
+
+#define GICV5_IRS_IST_CFGR_L2SZ_4K 0b00
+#define GICV5_IRS_IST_CFGR_L2SZ_16K 0b01
+#define GICV5_IRS_IST_CFGR_L2SZ_64K 0b10
+
+#define GICV5_IRS_IST_BASER_ADDR_MASK GENMASK_ULL(55, 6)
+#define GICV5_IRS_IST_BASER_VALID BIT_ULL(0)
+
+#define GICV5_IRS_MAP_L2_ISTR_ID GENMASK(23, 0)
+
+#define GICV5_ISTL1E_VALID BIT_ULL(0)
+
+#define GICV5_ISTL1E_L2_ADDR_MASK GENMASK_ULL(55, 12)
+
+/*
+ * ITS registers and tables structures
+ */
+#define GICV5_ITS_IDR1 0x0004
+#define GICV5_ITS_IDR2 0x0008
+#define GICV5_ITS_CR0 0x0080
+#define GICV5_ITS_CR1 0x0084
+#define GICV5_ITS_DT_BASER 0x00c0
+#define GICV5_ITS_DT_CFGR 0x00d0
+#define GICV5_ITS_DIDR 0x0100
+#define GICV5_ITS_EIDR 0x0108
+#define GICV5_ITS_INV_EVENTR 0x010c
+#define GICV5_ITS_INV_DEVICER 0x0110
+#define GICV5_ITS_STATUSR 0x0120
+#define GICV5_ITS_SYNCR 0x0140
+#define GICV5_ITS_SYNC_STATUSR 0x0148
+
+#define GICV5_ITS_IDR1_L2SZ GENMASK(10, 8)
+#define GICV5_ITS_IDR1_ITT_LEVELS BIT(7)
+#define GICV5_ITS_IDR1_DT_LEVELS BIT(6)
+#define GICV5_ITS_IDR1_DEVICEID_BITS GENMASK(5, 0)
+
+#define GICV5_ITS_IDR1_L2SZ_SUPPORT_4KB(r) FIELD_GET(BIT(8), (r))
+#define GICV5_ITS_IDR1_L2SZ_SUPPORT_16KB(r) FIELD_GET(BIT(9), (r))
+#define GICV5_ITS_IDR1_L2SZ_SUPPORT_64KB(r) FIELD_GET(BIT(10), (r))
+
+#define GICV5_ITS_IDR2_XDMN_EVENTs GENMASK(6, 5)
+#define GICV5_ITS_IDR2_EVENTID_BITS GENMASK(4, 0)
+
+#define GICV5_ITS_CR0_IDLE BIT(1)
+#define GICV5_ITS_CR0_ITSEN BIT(0)
+
+#define GICV5_ITS_CR1_ITT_RA BIT(7)
+#define GICV5_ITS_CR1_DT_RA BIT(6)
+#define GICV5_ITS_CR1_IC GENMASK(5, 4)
+#define GICV5_ITS_CR1_OC GENMASK(3, 2)
+#define GICV5_ITS_CR1_SH GENMASK(1, 0)
+
+#define GICV5_ITS_DT_CFGR_STRUCTURE BIT(16)
+#define GICV5_ITS_DT_CFGR_L2SZ GENMASK(7, 6)
+#define GICV5_ITS_DT_CFGR_DEVICEID_BITS GENMASK(5, 0)
+
+#define GICV5_ITS_DT_BASER_ADDR_MASK GENMASK_ULL(55, 3)
+
+#define GICV5_ITS_INV_DEVICER_I BIT(31)
+#define GICV5_ITS_INV_DEVICER_EVENTID_BITS GENMASK(5, 1)
+#define GICV5_ITS_INV_DEVICER_L1 BIT(0)
+
+#define GICV5_ITS_DIDR_DEVICEID GENMASK_ULL(31, 0)
+
+#define GICV5_ITS_EIDR_EVENTID GENMASK(15, 0)
+
+#define GICV5_ITS_INV_EVENTR_I BIT(31)
+#define GICV5_ITS_INV_EVENTR_ITT_L2SZ GENMASK(2, 1)
+#define GICV5_ITS_INV_EVENTR_L1 BIT(0)
+
+#define GICV5_ITS_STATUSR_IDLE BIT(0)
+
+#define GICV5_ITS_SYNCR_SYNC BIT_ULL(63)
+#define GICV5_ITS_SYNCR_SYNCALL BIT_ULL(32)
+#define GICV5_ITS_SYNCR_DEVICEID GENMASK_ULL(31, 0)
+
+#define GICV5_ITS_SYNC_STATUSR_IDLE BIT(0)
+
+#define GICV5_DTL1E_VALID BIT_ULL(0)
+/* Note that there is no shift for the address by design */
+#define GICV5_DTL1E_L2_ADDR_MASK GENMASK_ULL(55, 3)
+#define GICV5_DTL1E_SPAN GENMASK_ULL(63, 60)
+
+#define GICV5_DTL2E_VALID BIT_ULL(0)
+#define GICV5_DTL2E_ITT_L2SZ GENMASK_ULL(2, 1)
+/* Note that there is no shift for the address by design */
+#define GICV5_DTL2E_ITT_ADDR_MASK GENMASK_ULL(55, 3)
+#define GICV5_DTL2E_ITT_DSWE BIT_ULL(57)
+#define GICV5_DTL2E_ITT_STRUCTURE BIT_ULL(58)
+#define GICV5_DTL2E_EVENT_ID_BITS GENMASK_ULL(63, 59)
+
+#define GICV5_ITTL1E_VALID BIT_ULL(0)
+/* Note that there is no shift for the address by design */
+#define GICV5_ITTL1E_L2_ADDR_MASK GENMASK_ULL(55, 3)
+#define GICV5_ITTL1E_SPAN GENMASK_ULL(63, 60)
+
+#define GICV5_ITTL2E_LPI_ID GENMASK_ULL(23, 0)
+#define GICV5_ITTL2E_DAC GENMASK_ULL(29, 28)
+#define GICV5_ITTL2E_VIRTUAL BIT_ULL(30)
+#define GICV5_ITTL2E_VALID BIT_ULL(31)
+#define GICV5_ITTL2E_VM_ID GENMASK_ULL(47, 32)
+
+#define GICV5_ITS_DT_ITT_CFGR_L2SZ_4k 0b00
+#define GICV5_ITS_DT_ITT_CFGR_L2SZ_16k 0b01
+#define GICV5_ITS_DT_ITT_CFGR_L2SZ_64k 0b10
+
+#define GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR 0
+#define GICV5_ITS_DT_ITT_CFGR_STRUCTURE_TWO_LEVEL 1
+
+#define GICV5_ITS_HWIRQ_DEVICE_ID GENMASK_ULL(31, 0)
+#define GICV5_ITS_HWIRQ_EVENT_ID GENMASK_ULL(63, 32)
+
+/*
+ * IWB registers
+ */
+#define GICV5_IWB_IDR0 0x0000
+#define GICV5_IWB_CR0 0x0080
+#define GICV5_IWB_WENABLE_STATUSR 0x00c0
+#define GICV5_IWB_WENABLER 0x2000
+#define GICV5_IWB_WTMR 0x4000
+
+#define GICV5_IWB_IDR0_INT_DOMS GENMASK(14, 11)
+#define GICV5_IWB_IDR0_IW_RANGE GENMASK(10, 0)
+
+#define GICV5_IWB_CR0_IDLE BIT(1)
+#define GICV5_IWB_CR0_IWBEN BIT(0)
+
+#define GICV5_IWB_WENABLE_STATUSR_IDLE BIT(0)
+
+/*
+ * Global Data structures and functions
+ */
+struct gicv5_chip_data {
+ struct fwnode_handle *fwnode;
+ struct irq_domain *ppi_domain;
+ struct irq_domain *spi_domain;
+ struct irq_domain *lpi_domain;
+ struct irq_domain *ipi_domain;
+ u32 global_spi_count;
+ u8 cpuif_pri_bits;
+ u8 cpuif_id_bits;
+ u8 irs_pri_bits;
+ struct {
+ __le64 *l1ist_addr;
+ u32 l2_size;
+ u8 l2_bits;
+ bool l2;
+ } ist;
+};
+
+extern struct gicv5_chip_data gicv5_global_data __read_mostly;
+
+struct gicv5_irs_chip_data {
+ struct list_head entry;
+ struct fwnode_handle *fwnode;
+ void __iomem *irs_base;
+ u32 flags;
+ u32 spi_min;
+ u32 spi_range;
+ raw_spinlock_t spi_config_lock;
+};
+
+static inline int gicv5_wait_for_op_s_atomic(void __iomem *addr, u32 offset,
+ const char *reg_s, u32 mask,
+ u32 *val)
+{
+ void __iomem *reg = addr + offset;
+ u32 tmp;
+ int ret;
+
+ ret = readl_poll_timeout_atomic(reg, tmp, tmp & mask, 1, 10 * USEC_PER_MSEC);
+ if (unlikely(ret == -ETIMEDOUT)) {
+ pr_err_ratelimited("%s timeout...\n", reg_s);
+ return ret;
+ }
+
+ if (val)
+ *val = tmp;
+
+ return 0;
+}
+
+static inline int gicv5_wait_for_op_s(void __iomem *addr, u32 offset,
+ const char *reg_s, u32 mask)
+{
+ void __iomem *reg = addr + offset;
+ u32 val;
+ int ret;
+
+ ret = readl_poll_timeout(reg, val, val & mask, 1, 10 * USEC_PER_MSEC);
+ if (unlikely(ret == -ETIMEDOUT)) {
+ pr_err_ratelimited("%s timeout...\n", reg_s);
+ return ret;
+ }
+
+ return 0;
+}
+
+#define gicv5_wait_for_op_atomic(base, reg, mask, val) \
+ gicv5_wait_for_op_s_atomic(base, reg, #reg, mask, val)
+
+#define gicv5_wait_for_op(base, reg, mask) \
+ gicv5_wait_for_op_s(base, reg, #reg, mask)
+
+void __init gicv5_init_lpi_domain(void);
+void __init gicv5_free_lpi_domain(void);
+
+int gicv5_irs_of_probe(struct device_node *parent);
+void gicv5_irs_remove(void);
+int gicv5_irs_enable(void);
+void gicv5_irs_its_probe(void);
+int gicv5_irs_register_cpu(int cpuid);
+int gicv5_irs_cpu_to_iaffid(int cpu_id, u16 *iaffid);
+struct gicv5_irs_chip_data *gicv5_irs_lookup_by_spi_id(u32 spi_id);
+int gicv5_spi_irq_set_type(struct irq_data *d, unsigned int type);
+int gicv5_irs_iste_alloc(u32 lpi);
+void gicv5_irs_syncr(void);
+
+struct gicv5_its_devtab_cfg {
+ union {
+ struct {
+ __le64 *devtab;
+ } linear;
+ struct {
+ __le64 *l1devtab;
+ __le64 **l2ptrs;
+ } l2;
+ };
+ u32 cfgr;
+};
+
+struct gicv5_its_itt_cfg {
+ union {
+ struct {
+ __le64 *itt;
+ unsigned int num_ents;
+ } linear;
+ struct {
+ __le64 *l1itt;
+ __le64 **l2ptrs;
+ unsigned int num_l1_ents;
+ u8 l2sz;
+ } l2;
+ };
+ u8 event_id_bits;
+ bool l2itt;
+};
+
+void gicv5_init_lpis(u32 max);
+void gicv5_deinit_lpis(void);
+
+int gicv5_alloc_lpi(void);
+void gicv5_free_lpi(u32 lpi);
+
+void __init gicv5_its_of_probe(struct device_node *parent);
+#endif
diff --git a/include/linux/irqchip/arm-vgic-info.h b/include/linux/irqchip/arm-vgic-info.h
index a75b2c7de69d..ca1713fac6e3 100644
--- a/include/linux/irqchip/arm-vgic-info.h
+++ b/include/linux/irqchip/arm-vgic-info.h
@@ -15,6 +15,8 @@ enum gic_type {
GIC_V2,
/* Full GICv3, optionally with v2 compat */
GIC_V3,
+ /* Full GICv5, optionally with v3 compat */
+ GIC_V5,
};
struct gic_kvm_info {
@@ -34,6 +36,8 @@ struct gic_kvm_info {
bool has_v4_1;
/* Deactivation impared, subpar stuff */
bool no_hw_deactivation;
+ /* v3 compat support (GICv5 hosts, only) */
+ bool has_gcie_v3_compat;
};
#ifdef CONFIG_KVM
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 266b5e5bb8ce..4a86e6b915dd 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -212,6 +212,9 @@ enum {
/* Address and data pair is mutable when irq_set_affinity() */
IRQ_DOMAIN_FLAG_MSI_IMMUTABLE = (1 << 11),
+ /* IRQ domain requires parent fwnode matching */
+ IRQ_DOMAIN_FLAG_FWNODE_PARENT = (1 << 12),
+
/*
* Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
* for implementation specific purposes and ignored by the
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 1cce1f6410a9..989315dabb86 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -373,9 +373,9 @@ ftrace_vprintk(const char *fmt, va_list ap)
static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
#endif /* CONFIG_TRACING */
-/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
-#ifdef CONFIG_FTRACE_MCOUNT_RECORD
-# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
+/* Rebuild everything on CONFIG_DYNAMIC_FTRACE */
+#ifdef CONFIG_DYNAMIC_FTRACE
+# define REBUILD_DUE_TO_DYNAMIC_FTRACE
#endif
/* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index b8d69cfbb58b..ff6120463745 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -12,7 +12,7 @@ extern int start_stop_khugepaged(void);
extern void __khugepaged_enter(struct mm_struct *mm);
extern void __khugepaged_exit(struct mm_struct *mm);
extern void khugepaged_enter_vma(struct vm_area_struct *vma,
- unsigned long vm_flags);
+ vm_flags_t vm_flags);
extern void khugepaged_min_free_kbytes_update(void);
extern bool current_is_khugepaged(void);
extern int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
@@ -37,7 +37,7 @@ static inline void khugepaged_exit(struct mm_struct *mm)
{
}
static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
- unsigned long vm_flags)
+ vm_flags_t vm_flags)
{
}
static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index 68f69362d427..9a07c3215389 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -14,10 +14,7 @@
#include <linux/workqueue.h>
#include <linux/sysctl.h>
-#define KMOD_PATH_LEN 256
-
#ifdef CONFIG_MODULES
-extern char modprobe_path[]; /* for sysctl */
/* modprobe exit status on success, -ve on error. Return value
* usually useless though. */
extern __printf(2, 3)
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index d73095b5cd96..c17b955e7b0b 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -16,9 +16,9 @@
#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, int advice, unsigned long *vm_flags);
-
-void ksm_add_vma(struct vm_area_struct *vma);
+ unsigned long end, int advice, vm_flags_t *vm_flags);
+vm_flags_t ksm_vma_flags(const struct mm_struct *mm, const struct file *file,
+ vm_flags_t vm_flags);
int ksm_enable_merge_any(struct mm_struct *mm);
int ksm_disable_merge_any(struct mm_struct *mm);
int ksm_disable(struct mm_struct *mm);
@@ -97,8 +97,10 @@ bool ksm_process_mergeable(struct mm_struct *mm);
#else /* !CONFIG_KSM */
-static inline void ksm_add_vma(struct vm_area_struct *vma)
+static inline vm_flags_t ksm_vma_flags(const struct mm_struct *mm,
+ const struct file *file, vm_flags_t vm_flags)
{
+ return vm_flags;
}
static inline int ksm_disable(struct mm_struct *mm)
@@ -131,7 +133,7 @@ static inline void collect_procs_ksm(const struct folio *folio,
#ifdef CONFIG_MMU
static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, int advice, unsigned long *vm_flags)
+ unsigned long end, int advice, vm_flags_t *vm_flags)
{
return 0;
}
diff --git a/include/linux/kvm_dirty_ring.h b/include/linux/kvm_dirty_ring.h
index da4d9b5f58f1..eb10d87adf7d 100644
--- a/include/linux/kvm_dirty_ring.h
+++ b/include/linux/kvm_dirty_ring.h
@@ -49,9 +49,10 @@ static inline int kvm_dirty_ring_alloc(struct kvm *kvm, struct kvm_dirty_ring *r
}
static inline int kvm_dirty_ring_reset(struct kvm *kvm,
- struct kvm_dirty_ring *ring)
+ struct kvm_dirty_ring *ring,
+ int *nr_entries_reset)
{
- return 0;
+ return -ENOENT;
}
static inline void kvm_dirty_ring_push(struct kvm_vcpu *vcpu,
@@ -77,17 +78,8 @@ bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm);
u32 kvm_dirty_ring_get_rsvd_entries(struct kvm *kvm);
int kvm_dirty_ring_alloc(struct kvm *kvm, struct kvm_dirty_ring *ring,
int index, u32 size);
-
-/*
- * called with kvm->slots_lock held, returns the number of
- * processed pages.
- */
-int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring);
-
-/*
- * returns =0: successfully pushed
- * <0: unable to push, need to wait
- */
+int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring,
+ int *nr_entries_reset);
void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset);
bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 3bde4fb5c6aa..15656b7fba6c 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -190,6 +190,7 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
+#define KVM_PIT_IRQ_SOURCE_ID 2
extern struct mutex kvm_lock;
extern struct list_head vm_list;
@@ -1022,16 +1023,12 @@ void kvm_unlock_all_vcpus(struct kvm *kvm);
void vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
-#ifdef __KVM_HAVE_IOAPIC
+#ifdef CONFIG_KVM_IOAPIC
void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
-void kvm_arch_post_irq_routing_update(struct kvm *kvm);
#else
static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
{
}
-static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
-{
-}
#endif
#ifdef CONFIG_HAVE_KVM_IRQCHIP
@@ -1693,24 +1690,6 @@ static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
return false;
}
#endif
-#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
-void kvm_arch_start_assignment(struct kvm *kvm);
-void kvm_arch_end_assignment(struct kvm *kvm);
-bool kvm_arch_has_assigned_device(struct kvm *kvm);
-#else
-static inline void kvm_arch_start_assignment(struct kvm *kvm)
-{
-}
-
-static inline void kvm_arch_end_assignment(struct kvm *kvm)
-{
-}
-
-static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
-{
- return false;
-}
-#endif
static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu)
{
@@ -1788,8 +1767,6 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian);
void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian);
-int kvm_request_irq_source_id(struct kvm *kvm);
-void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
/*
@@ -2406,6 +2383,8 @@ struct kvm_vcpu *kvm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
+struct kvm_kernel_irqfd;
+
bool kvm_arch_has_irq_bypass(void);
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
struct irq_bypass_producer *);
@@ -2413,10 +2392,9 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
struct irq_bypass_producer *);
void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
-int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
- uint32_t guest_irq, bool set);
-bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *,
- struct kvm_kernel_irq_routing_entry *);
+void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd,
+ struct kvm_kernel_irq_routing_entry *old,
+ struct kvm_kernel_irq_routing_entry *new);
#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h
index 8ad43692e3bb..ef8c134ded8a 100644
--- a/include/linux/kvm_irqfd.h
+++ b/include/linux/kvm_irqfd.h
@@ -55,10 +55,13 @@ struct kvm_kernel_irqfd {
/* Used for setup/shutdown */
struct eventfd_ctx *eventfd;
struct list_head list;
- poll_table pt;
struct work_struct shutdown;
struct irq_bypass_consumer consumer;
struct irq_bypass_producer *producer;
+
+ struct kvm_vcpu *irq_bypass_vcpu;
+ struct list_head vcpu_list;
+ void *irq_bypass_data;
};
#endif /* __LINUX_KVM_IRQFD_H */
diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h
index 21ec856c36bc..775a96217518 100644
--- a/include/linux/led-class-flash.h
+++ b/include/linux/led-class-flash.h
@@ -197,7 +197,7 @@ int led_update_flash_brightness(struct led_classdev_flash *fled_cdev);
* @fled_cdev: the flash LED to set
* @timeout: the flash timeout to set it to
*
- * Set the flash strobe duration.
+ * Set the flash strobe timeout.
*
* Returns: 0 on success or negative error value on failure
*/
diff --git a/include/linux/leds.h b/include/linux/leds.h
index b3f0aa081064..b16b803cc1ac 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -294,7 +294,6 @@ void led_remove_lookup(struct led_lookup_data *led_lookup);
struct led_classdev *__must_check led_get(struct device *dev, char *con_id);
struct led_classdev *__must_check devm_led_get(struct device *dev, char *con_id);
-extern struct led_classdev *of_led_get(struct device_node *np, int index);
extern void led_put(struct led_classdev *led_cdev);
struct led_classdev *__must_check devm_of_led_get(struct device *dev,
int index);
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index e772aae71843..28f086c4a187 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -6,12 +6,12 @@
*/
#ifndef __LIBNVDIMM_H__
#define __LIBNVDIMM_H__
-#include <linux/kernel.h>
+
+#include <linux/io.h>
#include <linux/sizes.h>
+#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/uuid.h>
-#include <linux/spinlock.h>
-#include <linux/bio.h>
struct badrange_entry {
u64 start;
@@ -80,7 +80,9 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd, void *buf,
unsigned int buf_len, int *cmd_rc);
+struct attribute_group;
struct device_node;
+struct module;
struct nvdimm_bus_descriptor {
const struct attribute_group **attr_groups;
unsigned long cmd_mask;
@@ -121,6 +123,8 @@ struct nd_mapping_desc {
int position;
};
+struct bio;
+struct resource;
struct nd_region;
struct nd_region_desc {
struct resource *res;
@@ -147,8 +151,6 @@ static inline void __iomem *devm_nvdimm_ioremap(struct device *dev,
return (void __iomem *) devm_nvdimm_memremap(dev, offset, size, 0);
}
-struct nvdimm_bus;
-
/*
* Note that separate bits for locked + unlocked are defined so that
* 'flags == 0' corresponds to an error / not-supported state.
@@ -238,6 +240,9 @@ struct nvdimm_fw_ops {
int (*arm)(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arg);
};
+struct kobject;
+struct nvdimm_bus;
+
void badrange_init(struct badrange *badrange);
int badrange_add(struct badrange *badrange, u64 addr, u64 length);
void badrange_forget(struct badrange *badrange, phys_addr_t start,
diff --git a/include/linux/llist.h b/include/linux/llist.h
index 27b17f64bcee..607b2360c938 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -83,7 +83,7 @@ static inline void init_llist_head(struct llist_head *list)
*/
static inline void init_llist_node(struct llist_node *node)
{
- node->next = node;
+ WRITE_ONCE(node->next, node);
}
/**
@@ -97,7 +97,7 @@ static inline void init_llist_node(struct llist_node *node)
*/
static inline bool llist_on_list(const struct llist_node *node)
{
- return node->next != node;
+ return READ_ONCE(node->next) != node;
}
/**
@@ -220,7 +220,7 @@ static inline bool llist_empty(const struct llist_head *head)
static inline struct llist_node *llist_next(struct llist_node *node)
{
- return node->next;
+ return READ_ONCE(node->next);
}
/**
diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h
index 16a2ee4f8310..2ba846419524 100644
--- a/include/linux/local_lock.h
+++ b/include/linux/local_lock.h
@@ -13,13 +13,13 @@
* local_lock - Acquire a per CPU local lock
* @lock: The lock variable
*/
-#define local_lock(lock) __local_lock(lock)
+#define local_lock(lock) __local_lock(this_cpu_ptr(lock))
/**
* local_lock_irq - Acquire a per CPU local lock and disable interrupts
* @lock: The lock variable
*/
-#define local_lock_irq(lock) __local_lock_irq(lock)
+#define local_lock_irq(lock) __local_lock_irq(this_cpu_ptr(lock))
/**
* local_lock_irqsave - Acquire a per CPU local lock, save and disable
@@ -28,19 +28,19 @@
* @flags: Storage for interrupt flags
*/
#define local_lock_irqsave(lock, flags) \
- __local_lock_irqsave(lock, flags)
+ __local_lock_irqsave(this_cpu_ptr(lock), flags)
/**
* local_unlock - Release a per CPU local lock
* @lock: The lock variable
*/
-#define local_unlock(lock) __local_unlock(lock)
+#define local_unlock(lock) __local_unlock(this_cpu_ptr(lock))
/**
* local_unlock_irq - Release a per CPU local lock and enable interrupts
* @lock: The lock variable
*/
-#define local_unlock_irq(lock) __local_unlock_irq(lock)
+#define local_unlock_irq(lock) __local_unlock_irq(this_cpu_ptr(lock))
/**
* local_unlock_irqrestore - Release a per CPU local lock and restore
@@ -49,7 +49,7 @@
* @flags: Interrupt flags to restore
*/
#define local_unlock_irqrestore(lock, flags) \
- __local_unlock_irqrestore(lock, flags)
+ __local_unlock_irqrestore(this_cpu_ptr(lock), flags)
/**
* local_lock_init - Runtime initialize a lock instance
@@ -64,7 +64,7 @@
* locking constrains it will _always_ fail to acquire the lock in NMI or
* HARDIRQ context on PREEMPT_RT.
*/
-#define local_trylock(lock) __local_trylock(lock)
+#define local_trylock(lock) __local_trylock(this_cpu_ptr(lock))
/**
* local_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
@@ -77,7 +77,7 @@
* HARDIRQ context on PREEMPT_RT.
*/
#define local_trylock_irqsave(lock, flags) \
- __local_trylock_irqsave(lock, flags)
+ __local_trylock_irqsave(this_cpu_ptr(lock), flags)
DEFINE_GUARD(local_lock, local_lock_t __percpu*,
local_lock(_T),
@@ -91,10 +91,10 @@ DEFINE_LOCK_GUARD_1(local_lock_irqsave, local_lock_t __percpu,
unsigned long flags)
#define local_lock_nested_bh(_lock) \
- __local_lock_nested_bh(_lock)
+ __local_lock_nested_bh(this_cpu_ptr(_lock))
#define local_unlock_nested_bh(_lock) \
- __local_unlock_nested_bh(_lock)
+ __local_unlock_nested_bh(this_cpu_ptr(_lock))
DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*,
local_lock_nested_bh(_T),
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index 8d5ac16a9b17..d80b5306a2c0 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -99,14 +99,14 @@ do { \
local_trylock_t *tl; \
local_lock_t *l; \
\
- l = (local_lock_t *)this_cpu_ptr(lock); \
+ l = (local_lock_t *)(lock); \
tl = (local_trylock_t *)l; \
_Generic((lock), \
- __percpu local_trylock_t *: ({ \
+ local_trylock_t *: ({ \
lockdep_assert(tl->acquired == 0); \
WRITE_ONCE(tl->acquired, 1); \
}), \
- __percpu local_lock_t *: (void)0); \
+ local_lock_t *: (void)0); \
local_lock_acquire(l); \
} while (0)
@@ -133,7 +133,7 @@ do { \
local_trylock_t *tl; \
\
preempt_disable(); \
- tl = this_cpu_ptr(lock); \
+ tl = (lock); \
if (READ_ONCE(tl->acquired)) { \
preempt_enable(); \
tl = NULL; \
@@ -150,7 +150,7 @@ do { \
local_trylock_t *tl; \
\
local_irq_save(flags); \
- tl = this_cpu_ptr(lock); \
+ tl = (lock); \
if (READ_ONCE(tl->acquired)) { \
local_irq_restore(flags); \
tl = NULL; \
@@ -167,15 +167,15 @@ do { \
local_trylock_t *tl; \
local_lock_t *l; \
\
- l = (local_lock_t *)this_cpu_ptr(lock); \
+ l = (local_lock_t *)(lock); \
tl = (local_trylock_t *)l; \
local_lock_release(l); \
_Generic((lock), \
- __percpu local_trylock_t *: ({ \
+ local_trylock_t *: ({ \
lockdep_assert(tl->acquired == 1); \
WRITE_ONCE(tl->acquired, 0); \
}), \
- __percpu local_lock_t *: (void)0); \
+ local_lock_t *: (void)0); \
} while (0)
#define __local_unlock(lock) \
@@ -199,11 +199,11 @@ do { \
#define __local_lock_nested_bh(lock) \
do { \
lockdep_assert_in_softirq(); \
- local_lock_acquire(this_cpu_ptr(lock)); \
+ local_lock_acquire((lock)); \
} while (0)
#define __local_unlock_nested_bh(lock) \
- local_lock_release(this_cpu_ptr(lock))
+ local_lock_release((lock))
#else /* !CONFIG_PREEMPT_RT */
@@ -227,7 +227,7 @@ typedef spinlock_t local_trylock_t;
#define __local_lock(__lock) \
do { \
migrate_disable(); \
- spin_lock(this_cpu_ptr((__lock))); \
+ spin_lock((__lock)); \
} while (0)
#define __local_lock_irq(lock) __local_lock(lock)
@@ -241,7 +241,7 @@ typedef spinlock_t local_trylock_t;
#define __local_unlock(__lock) \
do { \
- spin_unlock(this_cpu_ptr((__lock))); \
+ spin_unlock((__lock)); \
migrate_enable(); \
} while (0)
@@ -252,12 +252,12 @@ typedef spinlock_t local_trylock_t;
#define __local_lock_nested_bh(lock) \
do { \
lockdep_assert_in_softirq_func(); \
- spin_lock(this_cpu_ptr(lock)); \
+ spin_lock((lock)); \
} while (0)
#define __local_unlock_nested_bh(lock) \
do { \
- spin_unlock(this_cpu_ptr((lock))); \
+ spin_unlock((lock)); \
} while (0)
#define __local_trylock(lock) \
@@ -268,7 +268,7 @@ do { \
__locked = 0; \
} else { \
migrate_disable(); \
- __locked = spin_trylock(this_cpu_ptr((lock))); \
+ __locked = spin_trylock((lock)); \
if (!__locked) \
migrate_enable(); \
} \
diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
index 9f361d3ab9d9..eae115a26488 100644
--- a/include/linux/lockdep_types.h
+++ b/include/linux/lockdep_types.h
@@ -175,7 +175,7 @@ struct lock_class_stats {
unsigned long bounces[nr_bounce_types];
};
-struct lock_class_stats lock_stats(struct lock_class *class);
+void lock_stats(struct lock_class *class, struct lock_class_stats *stats);
void clear_lock_stats(struct lock_class *class);
#endif
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index 9ef129038224..bafe143b1f78 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -75,8 +75,8 @@
* searching for gaps or any other code that needs to find the end of the data.
*/
struct maple_metadata {
- unsigned char end;
- unsigned char gap;
+ unsigned char end; /* end of data */
+ unsigned char gap; /* offset of largest gap */
};
/*
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index e43ff9f980a4..c640ba44dd6e 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -95,7 +95,6 @@ void mdio_device_remove(struct mdio_device *mdiodev);
void mdio_device_reset(struct mdio_device *mdiodev, int value);
int mdio_driver_register(struct mdio_driver *drv);
void mdio_driver_unregister(struct mdio_driver *drv);
-int mdio_device_bus_match(struct device *dev, const struct device_driver *drv);
static inline void mdio_device_get(struct mdio_device *mdiodev)
{
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 87b6688f124a..785173aa0739 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -251,8 +251,10 @@ struct mem_cgroup {
* that this indicator should NOT be used in legacy cgroup mode
* where socket memory is accounted/charged separately.
*/
- unsigned long socket_pressure;
-
+ u64 socket_pressure;
+#if BITS_PER_LONG < 64
+ seqlock_t socket_pressure_seqlock;
+#endif
int kmemcg_id;
/*
* memcg->objcg is wiped out as a part of the objcg repaprenting
@@ -1602,6 +1604,42 @@ extern struct static_key_false memcg_sockets_enabled_key;
#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
void mem_cgroup_sk_alloc(struct sock *sk);
void mem_cgroup_sk_free(struct sock *sk);
+
+#if BITS_PER_LONG < 64
+static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg)
+{
+ u64 val = get_jiffies_64() + HZ;
+ unsigned long flags;
+
+ write_seqlock_irqsave(&memcg->socket_pressure_seqlock, flags);
+ memcg->socket_pressure = val;
+ write_sequnlock_irqrestore(&memcg->socket_pressure_seqlock, flags);
+}
+
+static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg)
+{
+ unsigned int seq;
+ u64 val;
+
+ do {
+ seq = read_seqbegin(&memcg->socket_pressure_seqlock);
+ val = memcg->socket_pressure;
+ } while (read_seqretry(&memcg->socket_pressure_seqlock, seq));
+
+ return val;
+}
+#else
+static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg)
+{
+ WRITE_ONCE(memcg->socket_pressure, jiffies + HZ);
+}
+
+static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg)
+{
+ return READ_ONCE(memcg->socket_pressure);
+}
+#endif
+
static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{
#ifdef CONFIG_MEMCG_V1
@@ -1609,7 +1647,7 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
return !!memcg->tcpmem_pressure;
#endif /* CONFIG_MEMCG_V1 */
do {
- if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
+ if (time_before64(get_jiffies_64(), mem_cgroup_get_socket_pressure(memcg)))
return true;
} while ((memcg = parent_mem_cgroup(memcg)));
return false;
diff --git a/include/linux/memfd.h b/include/linux/memfd.h
index 246daadbfde8..6f606d9573c3 100644
--- a/include/linux/memfd.h
+++ b/include/linux/memfd.h
@@ -14,7 +14,7 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx);
* We also update VMA flags if appropriate by manipulating the VMA flags pointed
* to by vm_flags_ptr.
*/
-int memfd_check_seals_mmap(struct file *file, unsigned long *vm_flags_ptr);
+int memfd_check_seals_mmap(struct file *file, vm_flags_t *vm_flags_ptr);
#else
static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a)
{
@@ -25,7 +25,7 @@ static inline struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
return ERR_PTR(-EINVAL);
}
static inline int memfd_check_seals_mmap(struct file *file,
- unsigned long *vm_flags_ptr)
+ vm_flags_t *vm_flags_ptr)
{
return 0;
}
diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h
index 0dc0cf2863e2..7a805796fcfd 100644
--- a/include/linux/memory-tiers.h
+++ b/include/linux/memory-tiers.h
@@ -18,7 +18,7 @@
* adistance value (slightly faster) than default DRAM adistance to be part of
* the same memory tier.
*/
-#define MEMTIER_ADISTANCE_DRAM ((4 * MEMTIER_CHUNK_SIZE) + (MEMTIER_CHUNK_SIZE >> 1))
+#define MEMTIER_ADISTANCE_DRAM ((4L * MEMTIER_CHUNK_SIZE) + (MEMTIER_CHUNK_SIZE >> 1))
struct memory_tier;
struct memory_dev_type {
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 5ec4e6d209b9..40eb70ccb09d 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -109,8 +109,6 @@ struct memory_notify {
unsigned long altmap_nr_pages;
unsigned long start_pfn;
unsigned long nr_pages;
- int status_change_nid_normal;
- int status_change_nid;
};
struct notifier_block;
@@ -179,12 +177,30 @@ struct memory_group *memory_group_find_by_id(int mgid);
typedef int (*walk_memory_groups_func_t)(struct memory_group *, void *);
int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
struct memory_group *excluded, void *arg);
+struct memory_block *find_memory_block_by_id(unsigned long block_id);
#define hotplug_memory_notifier(fn, pri) ({ \
static __meminitdata struct notifier_block fn##_mem_nb =\
{ .notifier_call = fn, .priority = pri };\
register_memory_notifier(&fn##_mem_nb); \
})
+extern int sections_per_block;
+
+static inline unsigned long memory_block_id(unsigned long section_nr)
+{
+ return section_nr / sections_per_block;
+}
+
+static inline unsigned long pfn_to_block_id(unsigned long pfn)
+{
+ return memory_block_id(pfn_to_section_nr(pfn));
+}
+
+static inline unsigned long phys_to_block_id(unsigned long phys)
+{
+ return pfn_to_block_id(PFN_DOWN(phys));
+}
+
#ifdef CONFIG_NUMA
void memory_block_add_nid(struct memory_block *mem, int nid,
enum meminit_context context);
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index eaac5ae8c05c..23f038a16231 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -314,7 +314,8 @@ extern int add_memory_driver_managed(int nid, u64 start, u64 size,
mhp_t mhp_flags);
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages,
- struct vmem_altmap *altmap, int migratetype);
+ struct vmem_altmap *altmap, int migratetype,
+ bool isolate_pageblock);
extern void remove_pfn_range_from_zone(struct zone *zone,
unsigned long start_pfn,
unsigned long nr_pages);
diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h
index 556375b91316..9acd703dd5ca 100644
--- a/include/linux/mfd/davinci_voicecodec.h
+++ b/include/linux/mfd/davinci_voicecodec.h
@@ -10,11 +10,13 @@
#ifndef __LINUX_MFD_DAVINCI_VOICECODEC_H_
#define __LINUX_MFD_DAVINCI_VOICECODEC_H_
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
+#include <linux/bits.h>
#include <linux/mfd/core.h>
-#include <linux/platform_data/edma.h>
+#include <linux/types.h>
+struct clk;
+struct device;
+struct platform_device;
struct regmap;
/*
diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h
index 32e3470708ed..7e84738cbb20 100644
--- a/include/linux/mfd/madera/pdata.h
+++ b/include/linux/mfd/madera/pdata.h
@@ -8,10 +8,11 @@
#ifndef MADERA_PDATA_H
#define MADERA_PDATA_H
-#include <linux/kernel.h>
#include <linux/regulator/arizona-ldo1.h>
#include <linux/regulator/arizona-micsupp.h>
#include <linux/regulator/machine.h>
+#include <linux/types.h>
+
#include <sound/madera-pdata.h>
#define MADERA_MAX_MICBIAS 4
diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h
deleted file mode 100644
index 42d2b0e4884e..000000000000
--- a/include/linux/mfd/pcf50633/core.h
+++ /dev/null
@@ -1,229 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * core.h -- Core driver for NXP PCF50633
- *
- * (C) 2006-2008 by Openmoko, Inc.
- * All rights reserved.
- */
-
-#ifndef __LINUX_MFD_PCF50633_CORE_H
-#define __LINUX_MFD_PCF50633_CORE_H
-
-#include <linux/i2c.h>
-#include <linux/workqueue.h>
-#include <linux/regulator/driver.h>
-#include <linux/regulator/machine.h>
-#include <linux/pm.h>
-#include <linux/power_supply.h>
-
-struct pcf50633;
-struct regmap;
-
-#define PCF50633_NUM_REGULATORS 11
-
-struct pcf50633_platform_data {
- struct regulator_init_data reg_init_data[PCF50633_NUM_REGULATORS];
-
- char **batteries;
- int num_batteries;
-
- /*
- * Should be set accordingly to the reference resistor used, see
- * I_{ch(ref)} charger reference current in the pcf50633 User
- * Manual.
- */
- int charger_reference_current_ma;
-
- /* Callbacks */
- void (*probe_done)(struct pcf50633 *);
- void (*mbc_event_callback)(struct pcf50633 *, int);
- void (*regulator_registered)(struct pcf50633 *, int);
- void (*force_shutdown)(struct pcf50633 *);
-
- u8 resumers[5];
-};
-
-struct pcf50633_irq {
- void (*handler) (int, void *);
- void *data;
-};
-
-int pcf50633_register_irq(struct pcf50633 *pcf, int irq,
- void (*handler) (int, void *), void *data);
-int pcf50633_free_irq(struct pcf50633 *pcf, int irq);
-
-int pcf50633_irq_mask(struct pcf50633 *pcf, int irq);
-int pcf50633_irq_unmask(struct pcf50633 *pcf, int irq);
-int pcf50633_irq_mask_get(struct pcf50633 *pcf, int irq);
-
-int pcf50633_read_block(struct pcf50633 *, u8 reg,
- int nr_regs, u8 *data);
-int pcf50633_write_block(struct pcf50633 *pcf, u8 reg,
- int nr_regs, u8 *data);
-u8 pcf50633_reg_read(struct pcf50633 *, u8 reg);
-int pcf50633_reg_write(struct pcf50633 *pcf, u8 reg, u8 val);
-
-int pcf50633_reg_set_bit_mask(struct pcf50633 *pcf, u8 reg, u8 mask, u8 val);
-int pcf50633_reg_clear_bits(struct pcf50633 *pcf, u8 reg, u8 bits);
-
-/* Interrupt registers */
-
-#define PCF50633_REG_INT1 0x02
-#define PCF50633_REG_INT2 0x03
-#define PCF50633_REG_INT3 0x04
-#define PCF50633_REG_INT4 0x05
-#define PCF50633_REG_INT5 0x06
-
-#define PCF50633_REG_INT1M 0x07
-#define PCF50633_REG_INT2M 0x08
-#define PCF50633_REG_INT3M 0x09
-#define PCF50633_REG_INT4M 0x0a
-#define PCF50633_REG_INT5M 0x0b
-
-enum {
- /* Chip IRQs */
- PCF50633_IRQ_ADPINS,
- PCF50633_IRQ_ADPREM,
- PCF50633_IRQ_USBINS,
- PCF50633_IRQ_USBREM,
- PCF50633_IRQ_RESERVED1,
- PCF50633_IRQ_RESERVED2,
- PCF50633_IRQ_ALARM,
- PCF50633_IRQ_SECOND,
- PCF50633_IRQ_ONKEYR,
- PCF50633_IRQ_ONKEYF,
- PCF50633_IRQ_EXTON1R,
- PCF50633_IRQ_EXTON1F,
- PCF50633_IRQ_EXTON2R,
- PCF50633_IRQ_EXTON2F,
- PCF50633_IRQ_EXTON3R,
- PCF50633_IRQ_EXTON3F,
- PCF50633_IRQ_BATFULL,
- PCF50633_IRQ_CHGHALT,
- PCF50633_IRQ_THLIMON,
- PCF50633_IRQ_THLIMOFF,
- PCF50633_IRQ_USBLIMON,
- PCF50633_IRQ_USBLIMOFF,
- PCF50633_IRQ_ADCRDY,
- PCF50633_IRQ_ONKEY1S,
- PCF50633_IRQ_LOWSYS,
- PCF50633_IRQ_LOWBAT,
- PCF50633_IRQ_HIGHTMP,
- PCF50633_IRQ_AUTOPWRFAIL,
- PCF50633_IRQ_DWN1PWRFAIL,
- PCF50633_IRQ_DWN2PWRFAIL,
- PCF50633_IRQ_LEDPWRFAIL,
- PCF50633_IRQ_LEDOVP,
- PCF50633_IRQ_LDO1PWRFAIL,
- PCF50633_IRQ_LDO2PWRFAIL,
- PCF50633_IRQ_LDO3PWRFAIL,
- PCF50633_IRQ_LDO4PWRFAIL,
- PCF50633_IRQ_LDO5PWRFAIL,
- PCF50633_IRQ_LDO6PWRFAIL,
- PCF50633_IRQ_HCLDOPWRFAIL,
- PCF50633_IRQ_HCLDOOVL,
-
- /* Always last */
- PCF50633_NUM_IRQ,
-};
-
-struct pcf50633 {
- struct device *dev;
- struct regmap *regmap;
-
- struct pcf50633_platform_data *pdata;
- int irq;
- struct pcf50633_irq irq_handler[PCF50633_NUM_IRQ];
- struct work_struct irq_work;
- struct workqueue_struct *work_queue;
- struct mutex lock;
-
- u8 mask_regs[5];
-
- u8 suspend_irq_masks[5];
- u8 resume_reason[5];
- int is_suspended;
-
- int onkey1s_held;
-
- struct platform_device *rtc_pdev;
- struct platform_device *mbc_pdev;
- struct platform_device *adc_pdev;
- struct platform_device *input_pdev;
- struct platform_device *bl_pdev;
- struct platform_device *regulator_pdev[PCF50633_NUM_REGULATORS];
-};
-
-enum pcf50633_reg_int1 {
- PCF50633_INT1_ADPINS = 0x01, /* Adapter inserted */
- PCF50633_INT1_ADPREM = 0x02, /* Adapter removed */
- PCF50633_INT1_USBINS = 0x04, /* USB inserted */
- PCF50633_INT1_USBREM = 0x08, /* USB removed */
- /* reserved */
- PCF50633_INT1_ALARM = 0x40, /* RTC alarm time is reached */
- PCF50633_INT1_SECOND = 0x80, /* RTC periodic second interrupt */
-};
-
-enum pcf50633_reg_int2 {
- PCF50633_INT2_ONKEYR = 0x01, /* ONKEY rising edge */
- PCF50633_INT2_ONKEYF = 0x02, /* ONKEY falling edge */
- PCF50633_INT2_EXTON1R = 0x04, /* EXTON1 rising edge */
- PCF50633_INT2_EXTON1F = 0x08, /* EXTON1 falling edge */
- PCF50633_INT2_EXTON2R = 0x10, /* EXTON2 rising edge */
- PCF50633_INT2_EXTON2F = 0x20, /* EXTON2 falling edge */
- PCF50633_INT2_EXTON3R = 0x40, /* EXTON3 rising edge */
- PCF50633_INT2_EXTON3F = 0x80, /* EXTON3 falling edge */
-};
-
-enum pcf50633_reg_int3 {
- PCF50633_INT3_BATFULL = 0x01, /* Battery full */
- PCF50633_INT3_CHGHALT = 0x02, /* Charger halt */
- PCF50633_INT3_THLIMON = 0x04,
- PCF50633_INT3_THLIMOFF = 0x08,
- PCF50633_INT3_USBLIMON = 0x10,
- PCF50633_INT3_USBLIMOFF = 0x20,
- PCF50633_INT3_ADCRDY = 0x40, /* ADC result ready */
- PCF50633_INT3_ONKEY1S = 0x80, /* ONKEY pressed 1 second */
-};
-
-enum pcf50633_reg_int4 {
- PCF50633_INT4_LOWSYS = 0x01,
- PCF50633_INT4_LOWBAT = 0x02,
- PCF50633_INT4_HIGHTMP = 0x04,
- PCF50633_INT4_AUTOPWRFAIL = 0x08,
- PCF50633_INT4_DWN1PWRFAIL = 0x10,
- PCF50633_INT4_DWN2PWRFAIL = 0x20,
- PCF50633_INT4_LEDPWRFAIL = 0x40,
- PCF50633_INT4_LEDOVP = 0x80,
-};
-
-enum pcf50633_reg_int5 {
- PCF50633_INT5_LDO1PWRFAIL = 0x01,
- PCF50633_INT5_LDO2PWRFAIL = 0x02,
- PCF50633_INT5_LDO3PWRFAIL = 0x04,
- PCF50633_INT5_LDO4PWRFAIL = 0x08,
- PCF50633_INT5_LDO5PWRFAIL = 0x10,
- PCF50633_INT5_LDO6PWRFAIL = 0x20,
- PCF50633_INT5_HCLDOPWRFAIL = 0x40,
- PCF50633_INT5_HCLDOOVL = 0x80,
-};
-
-/* misc. registers */
-#define PCF50633_REG_OOCSHDWN 0x0c
-
-/* LED registers */
-#define PCF50633_REG_LEDOUT 0x28
-#define PCF50633_REG_LEDENA 0x29
-#define PCF50633_REG_LEDCTL 0x2a
-#define PCF50633_REG_LEDDIM 0x2b
-
-static inline struct pcf50633 *dev_to_pcf50633(struct device *dev)
-{
- return dev_get_drvdata(dev);
-}
-
-int pcf50633_irq_init(struct pcf50633 *pcf, int irq);
-void pcf50633_irq_free(struct pcf50633 *pcf);
-extern const struct dev_pm_ops pcf50633_pm;
-
-#endif
diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h
index 69cbea78b430..28170ee08898 100644
--- a/include/linux/mfd/rk808.h
+++ b/include/linux/mfd/rk808.h
@@ -812,6 +812,8 @@ enum rk806_pin_dr_sel {
#define RK806_INT_POL_H BIT(1)
#define RK806_INT_POL_L 0
+/* SYS_CFG3 */
+#define RK806_RST_FUN_MSK GENMASK(7, 6)
#define RK806_SLAVE_RESTART_FUN_MSK BIT(1)
#define RK806_SLAVE_RESTART_FUN_EN BIT(1)
#define RK806_SLAVE_RESTART_FUN_OFF 0
diff --git a/include/linux/mfd/syscon/atmel-smc.h b/include/linux/mfd/syscon/atmel-smc.h
index e9e24f4c4578..9b9119c742a2 100644
--- a/include/linux/mfd/syscon/atmel-smc.h
+++ b/include/linux/mfd/syscon/atmel-smc.h
@@ -11,9 +11,11 @@
#ifndef _LINUX_MFD_SYSCON_ATMEL_SMC_H_
#define _LINUX_MFD_SYSCON_ATMEL_SMC_H_
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/regmap.h>
+#include <linux/bits.h>
+#include <linux/types.h>
+
+struct device_node;
+struct regmap;
#define ATMEL_SMC_SETUP(cs) (((cs) * 0x10))
#define ATMEL_HSMC_SETUP(layout, cs) \
diff --git a/include/linux/mfd/tps65219.h b/include/linux/mfd/tps65219.h
index 3e8d29189267..55234e771ba7 100644
--- a/include/linux/mfd/tps65219.h
+++ b/include/linux/mfd/tps65219.h
@@ -10,7 +10,6 @@
#define MFD_TPS65219_H
#include <linux/bitops.h>
-#include <linux/notifier.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
@@ -438,17 +437,13 @@ enum tps65219_irqs {
*
* @dev: MFD device
* @regmap: Regmap for accessing the device registers
- * @chip_id: Chip ID
* @irq_data: Regmap irq data used for the irq chip
- * @nb: notifier block for the restart handler
*/
struct tps65219 {
struct device *dev;
struct regmap *regmap;
- unsigned int chip_id;
struct regmap_irq_chip_data *irq_data;
- struct notifier_block nb;
};
#endif /* MFD_TPS65219_H */
diff --git a/include/linux/mfd/twl.h b/include/linux/mfd/twl.h
index 85dc406173db..b31e07fa4d51 100644
--- a/include/linux/mfd/twl.h
+++ b/include/linux/mfd/twl.h
@@ -205,27 +205,6 @@ int twl_get_hfclk_rate(void);
int twl6030_interrupt_unmask(u8 bit_mask, u8 offset);
int twl6030_interrupt_mask(u8 bit_mask, u8 offset);
-/* Card detect Configuration for MMC1 Controller on OMAP4 */
-#ifdef CONFIG_TWL4030_CORE
-int twl6030_mmc_card_detect_config(void);
-#else
-static inline int twl6030_mmc_card_detect_config(void)
-{
- pr_debug("twl6030_mmc_card_detect_config not supported\n");
- return 0;
-}
-#endif
-
-/* MMC1 Controller on OMAP4 uses Phoenix irq for Card detect */
-#ifdef CONFIG_TWL4030_CORE
-int twl6030_mmc_card_detect(struct device *dev, int slot);
-#else
-static inline int twl6030_mmc_card_detect(struct device *dev, int slot)
-{
- pr_debug("Call back twl6030_mmc_card_detect not supported\n");
- return -EIO;
-}
-#endif
/*----------------------------------------------------------------------*/
/*
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index a3241e4d7548..5f70d3b5d1b1 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -8,11 +8,12 @@
#ifndef __LINUX_MFD_WM8350_CORE_H_
#define __LINUX_MFD_WM8350_CORE_H_
-#include <linux/kernel.h>
-#include <linux/mutex.h>
-#include <linux/interrupt.h>
#include <linux/completion.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
#include <linux/regmap.h>
+#include <linux/types.h>
#include <linux/mfd/wm8350/audio.h>
#include <linux/mfd/wm8350/gpio.h>
@@ -21,6 +22,9 @@
#include <linux/mfd/wm8350/supply.h>
#include <linux/mfd/wm8350/wdt.h>
+struct device;
+struct platform_device;
+
/*
* Register values.
*/
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index aaa2114498d6..acadd41e0b5c 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -35,8 +35,8 @@ struct migration_target_control;
* @src page. The driver should copy the contents of the
* @src page to the @dst page and set up the fields of @dst page.
* Both pages are locked.
- * If page migration is successful, the driver should call
- * __ClearPageMovable(@src) and return MIGRATEPAGE_SUCCESS.
+ * If page migration is successful, the driver should
+ * return MIGRATEPAGE_SUCCESS.
* If the driver cannot migrate the page at the moment, it can return
* -EAGAIN. The VM interprets this as a temporary migration failure and
* will retry it later. Any other error value is a permanent migration
@@ -69,7 +69,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
unsigned long private, enum migrate_mode mode, int reason,
unsigned int *ret_succeeded);
struct folio *alloc_migration_target(struct folio *src, unsigned long private);
-bool isolate_movable_page(struct page *page, isolate_mode_t mode);
+bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode);
bool isolate_folio_to_list(struct folio *folio, struct list_head *list);
int migrate_huge_page_move_mapping(struct address_space *mapping,
@@ -90,7 +90,7 @@ static inline int migrate_pages(struct list_head *l, new_folio_t new,
static inline struct folio *alloc_migration_target(struct folio *src,
unsigned long private)
{ return NULL; }
-static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
+static inline bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode)
{ return false; }
static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
{ return false; }
@@ -103,44 +103,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
#endif /* CONFIG_MIGRATION */
-#ifdef CONFIG_COMPACTION
-bool PageMovable(struct page *page);
-void __SetPageMovable(struct page *page, const struct movable_operations *ops);
-void __ClearPageMovable(struct page *page);
-#else
-static inline bool PageMovable(struct page *page) { return false; }
-static inline void __SetPageMovable(struct page *page,
- const struct movable_operations *ops)
-{
-}
-static inline void __ClearPageMovable(struct page *page)
-{
-}
-#endif
-
-static inline bool folio_test_movable(struct folio *folio)
-{
- return PageMovable(&folio->page);
-}
-
-static inline
-const struct movable_operations *folio_movable_ops(struct folio *folio)
-{
- VM_BUG_ON(!__folio_test_movable(folio));
-
- return (const struct movable_operations *)
- ((unsigned long)folio->mapping - PAGE_MAPPING_MOVABLE);
-}
-
-static inline
-const struct movable_operations *page_movable_ops(struct page *page)
-{
- VM_BUG_ON(!__PageMovable(page));
-
- return (const struct movable_operations *)
- ((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE);
-}
-
#ifdef CONFIG_NUMA_BALANCING
int migrate_misplaced_folio_prepare(struct folio *folio,
struct vm_area_struct *vma, int node);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 6822cfa5f4ad..9d2467f982ad 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -280,6 +280,7 @@ enum {
MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23,
MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE = 1ull << 25,
MLX5_MKEY_MASK_FREE = 1ull << 29,
+ MLX5_MKEY_MASK_PAGE_SIZE_5 = 1ull << 42,
MLX5_MKEY_MASK_RELAXED_ORDERING_READ = 1ull << 47,
};
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index e6ba8f4f4bd1..8c5fbfb85749 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -36,6 +36,7 @@
#include <linux/kernel.h>
#include <linux/completion.h>
#include <linux/pci.h>
+#include <linux/pci-tph.h>
#include <linux/irq.h>
#include <linux/spinlock_types.h>
#include <linux/semaphore.h>
@@ -688,6 +689,7 @@ struct mlx5_fw_tracer;
struct mlx5_vxlan;
struct mlx5_geneve;
struct mlx5_hv_vhca;
+struct mlx5_st;
#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
@@ -757,6 +759,7 @@ struct mlx5_core_dev {
u32 issi;
struct mlx5e_resources mlx5e_res;
struct mlx5_dm *dm;
+ struct mlx5_st *st;
struct mlx5_vxlan *vxlan;
struct mlx5_geneve *geneve;
struct {
@@ -1160,6 +1163,23 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
u64 length, u16 uid, phys_addr_t addr, u32 obj_id);
+#ifdef CONFIG_PCIE_TPH
+int mlx5_st_alloc_index(struct mlx5_core_dev *dev, enum tph_mem_type mem_type,
+ unsigned int cpu_uid, u16 *st_index);
+int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index);
+#else
+static inline int mlx5_st_alloc_index(struct mlx5_core_dev *dev,
+ enum tph_mem_type mem_type,
+ unsigned int cpu_uid, u16 *st_index)
+{
+ return -EOPNOTSUPP;
+}
+static inline int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev);
void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev);
@@ -1349,4 +1369,9 @@ enum {
};
bool mlx5_wc_support_get(struct mlx5_core_dev *mdev);
+
+static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
+{
+ return devlink_net(priv_to_devlink(dev));
+}
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 939e58c2f386..86055d55836d 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -40,7 +40,7 @@
#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
-#define MLX5_RDMA_TRANSPORT_BYPASS_PRIO 0
+#define MLX5_RDMA_TRANSPORT_BYPASS_PRIO 16
#define MLX5_FS_MAX_POOL_SIZE BIT(30)
enum mlx5_flow_destination_type {
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 2c09df4ee574..8360d9011d4f 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -420,7 +420,8 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
/* Table 2170 - Flow Table Fields Supported 2 Format */
struct mlx5_ifc_flow_table_fields_supported_2_bits {
- u8 reserved_at_0[0x2];
+ u8 inner_l4_type_ext[0x1];
+ u8 outer_l4_type_ext[0x1];
u8 inner_l4_type[0x1];
u8 outer_l4_type[0x1];
u8 reserved_at_4[0xa];
@@ -429,7 +430,11 @@ struct mlx5_ifc_flow_table_fields_supported_2_bits {
u8 tunnel_header_0_1[0x1];
u8 reserved_at_11[0xf];
- u8 reserved_at_20[0x60];
+ u8 reserved_at_20[0xf];
+ u8 ipsec_next_header[0x1];
+ u8 reserved_at_30[0x10];
+
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_flow_table_prop_layout_bits {
@@ -552,6 +557,13 @@ enum {
MLX5_PACKET_L4_TYPE_UDP,
};
+enum {
+ MLX5_PACKET_L4_TYPE_EXT_NONE,
+ MLX5_PACKET_L4_TYPE_EXT_TCP,
+ MLX5_PACKET_L4_TYPE_EXT_UDP,
+ MLX5_PACKET_L4_TYPE_EXT_ICMP,
+};
+
struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 smac_47_16[0x20];
@@ -578,10 +590,10 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 tcp_dport[0x10];
u8 l4_type[0x2];
- u8 reserved_at_c2[0xe];
+ u8 l4_type_ext[0x4];
+ u8 reserved_at_c6[0xa];
u8 ipv4_ihl[0x4];
- u8 reserved_at_c4[0x4];
-
+ u8 reserved_at_d4[0x4];
u8 ttl_hoplimit[0x8];
u8 udp_sport[0x10];
@@ -689,10 +701,9 @@ struct mlx5_ifc_fte_match_set_misc2_bits {
u8 metadata_reg_a[0x20];
u8 reserved_at_1a0[0x8];
-
u8 macsec_syndrome[0x8];
u8 ipsec_syndrome[0x8];
- u8 reserved_at_1b8[0x8];
+ u8 ipsec_next_header[0x8];
u8 reserved_at_1c0[0x40];
};
@@ -1846,7 +1857,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_bf_reg_size[0x5];
- u8 reserved_at_270[0x3];
+ u8 disciplined_fr_counter[0x1];
+ u8 reserved_at_271[0x2];
u8 qp_error_syndrome[0x1];
u8 reserved_at_274[0x2];
u8 lag_dct[0x2];
@@ -1859,7 +1871,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_280[0x10];
u8 max_wqe_sz_sq[0x10];
- u8 reserved_at_2a0[0xb];
+ u8 reserved_at_2a0[0x7];
+ u8 mkey_pcie_tph[0x1];
+ u8 reserved_at_2a8[0x3];
u8 shampo[0x1];
u8 reserved_at_2ac[0x4];
u8 max_wqe_sz_rq[0x10];
@@ -2171,7 +2185,9 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 min_mkey_log_entity_size_fixed_buffer[0x5];
u8 ec_vf_vport_base[0x10];
- u8 reserved_at_3a0[0xa];
+ u8 reserved_at_3a0[0x2];
+ u8 max_mkey_log_entity_size_fixed_buffer[0x6];
+ u8 reserved_at_3a8[0x2];
u8 max_mkey_log_entity_size_mtt[0x6];
u8 max_rqt_vhca_id[0x10];
@@ -4404,6 +4420,10 @@ enum {
MLX5_MKC_ACCESS_MODE_CROSSING = 0x6,
};
+enum {
+ MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX = 0,
+};
+
struct mlx5_ifc_mkc_bits {
u8 reserved_at_0[0x1];
u8 free[0x1];
@@ -4455,7 +4475,11 @@ struct mlx5_ifc_mkc_bits {
u8 relaxed_ordering_read[0x1];
u8 log_page_size[0x6];
- u8 reserved_at_1e0[0x20];
+ u8 reserved_at_1e0[0x5];
+ u8 pcie_tph_en[0x1];
+ u8 pcie_tph_ph[0x2];
+ u8 pcie_tph_steering_tag_index[0x8];
+ u8 reserved_at_1f0[0x10];
};
struct mlx5_ifc_pkey_bits {
@@ -9980,6 +10004,10 @@ struct mlx5_ifc_pude_reg_bits {
u8 reserved_at_20[0x60];
};
+enum {
+ MLX5_PTYS_CONNECTOR_TYPE_PORT_DA = 0x7,
+};
+
struct mlx5_ifc_ptys_reg_bits {
u8 reserved_at_0[0x1];
u8 an_disable_admin[0x1];
@@ -10016,7 +10044,8 @@ struct mlx5_ifc_ptys_reg_bits {
u8 ib_link_width_oper[0x10];
u8 ib_proto_oper[0x10];
- u8 reserved_at_160[0x1c];
+ u8 reserved_at_160[0x8];
+ u8 lane_rate_oper[0x14];
u8 connector_type[0x4];
u8 eth_proto_lp_advertise[0x20];
@@ -10460,10 +10489,19 @@ struct mlx5_ifc_pifr_reg_bits {
u8 port_filter_update_en[8][0x20];
};
+enum {
+ MLX5_BUF_OWNERSHIP_UNKNOWN = 0x0,
+ MLX5_BUF_OWNERSHIP_FW_OWNED = 0x1,
+ MLX5_BUF_OWNERSHIP_SW_OWNED = 0x2,
+};
+
struct mlx5_ifc_pfcc_reg_bits {
- u8 reserved_at_0[0x8];
+ u8 reserved_at_0[0x4];
+ u8 buf_ownership[0x2];
+ u8 reserved_at_6[0x2];
u8 local_port[0x8];
- u8 reserved_at_10[0xb];
+ u8 reserved_at_10[0xa];
+ u8 cable_length_mask[0x1];
u8 ppan_mask_n[0x1];
u8 minor_stall_mask[0x1];
u8 critical_stall_mask[0x1];
@@ -10492,7 +10530,10 @@ struct mlx5_ifc_pfcc_reg_bits {
u8 device_stall_minor_watermark[0x10];
u8 device_stall_critical_watermark[0x10];
- u8 reserved_at_a0[0x60];
+ u8 reserved_at_a0[0x18];
+ u8 cable_length[0x8];
+
+ u8 reserved_at_c0[0x40];
};
struct mlx5_ifc_pelc_reg_bits {
@@ -10593,11 +10634,15 @@ struct mlx5_ifc_mtutc_reg_bits {
struct mlx5_ifc_pcam_enhanced_features_bits {
u8 reserved_at_0[0x10];
u8 ppcnt_recovery_counters[0x1];
- u8 reserved_at_11[0xc];
+ u8 reserved_at_11[0x7];
+ u8 cable_length[0x1];
+ u8 reserved_at_19[0x4];
u8 fec_200G_per_lane_in_pplm[0x1];
u8 reserved_at_1e[0x2a];
u8 fec_100G_per_lane_in_pplm[0x1];
- u8 reserved_at_49[0x1f];
+ u8 reserved_at_49[0xa];
+ u8 buffer_ownership[0x1];
+ u8 resereved_at_54[0x14];
u8 fec_50G_per_lane_in_pplm[0x1];
u8 reserved_at_69[0x4];
u8 rx_icrc_encapsulated_counter[0x1];
@@ -12380,7 +12425,9 @@ struct mlx5_ifc_mtrc_ctrl_bits {
struct mlx5_ifc_host_params_context_bits {
u8 host_number[0x8];
- u8 reserved_at_8[0x7];
+ u8 reserved_at_8[0x5];
+ u8 host_pf_not_exist[0x1];
+ u8 reserved_at_14[0x1];
u8 host_pf_disabled[0x1];
u8 host_num_of_vfs[0x10];
@@ -12502,17 +12549,6 @@ struct mlx5_ifc_affiliated_event_header_bits {
};
enum {
- MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT_ULL(0xc),
- MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = BIT_ULL(0x13),
- MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER = BIT_ULL(0x20),
- MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = BIT_ULL(0x24),
-};
-
-enum {
- MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_RDMA_CTRL = BIT_ULL(0x13),
-};
-
-enum {
MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc,
MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13,
MLX5_GENERAL_OBJECT_TYPES_SAMPLER = 0x20,
@@ -12520,10 +12556,29 @@ enum {
MLX5_GENERAL_OBJECT_TYPES_MACSEC = 0x27,
MLX5_GENERAL_OBJECT_TYPES_INT_KEK = 0x47,
MLX5_GENERAL_OBJECT_TYPES_RDMA_CTRL = 0x53,
+ MLX5_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT = 0x58,
MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS = 0xff15,
};
enum {
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY =
+ BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC =
+ BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_IPSEC),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER =
+ BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_SAMPLER),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_FLOW_METER_ASO =
+ BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO),
+};
+
+enum {
+ MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_RDMA_CTRL =
+ BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_RDMA_CTRL - 0x40),
+ MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT =
+ BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT - 0x40),
+};
+
+enum {
MLX5_IPSEC_OBJECT_ICV_LEN_16B,
};
@@ -13279,4 +13334,41 @@ struct mlx5_ifc_mrtcq_reg_bits {
u8 reserved_at_80[0x180];
};
+struct mlx5_ifc_pcie_cong_event_obj_bits {
+ u8 modify_select_field[0x40];
+
+ u8 inbound_event_en[0x1];
+ u8 outbound_event_en[0x1];
+ u8 reserved_at_42[0x1e];
+
+ u8 reserved_at_60[0x1];
+ u8 inbound_cong_state[0x3];
+ u8 reserved_at_64[0x1];
+ u8 outbound_cong_state[0x3];
+ u8 reserved_at_68[0x18];
+
+ u8 inbound_cong_low_threshold[0x10];
+ u8 inbound_cong_high_threshold[0x10];
+
+ u8 outbound_cong_low_threshold[0x10];
+ u8 outbound_cong_high_threshold[0x10];
+
+ u8 reserved_at_e0[0x340];
+};
+
+struct mlx5_ifc_pcie_cong_event_cmd_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_pcie_cong_event_obj_bits cong_obj;
+};
+
+struct mlx5_ifc_pcie_cong_event_cmd_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits hdr;
+ struct mlx5_ifc_pcie_cong_event_obj_bits cong_obj;
+};
+
+enum mlx5e_pcie_cong_event_mod_field {
+ MLX5_PCIE_CONG_EVENT_MOD_EVENT_EN = BIT(0),
+ MLX5_PCIE_CONG_EVENT_MOD_THRESH = BIT(2),
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 30138219940e..349f0d9aad22 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1325,6 +1325,8 @@ static inline void get_page(struct page *page)
struct folio *folio = page_folio(page);
if (WARN_ON_ONCE(folio_test_slab(folio)))
return;
+ if (WARN_ON_ONCE(folio_test_large_kmalloc(folio)))
+ return;
folio_get(folio);
}
@@ -1419,7 +1421,7 @@ static inline void put_page(struct page *page)
{
struct folio *folio = page_folio(page);
- if (folio_test_slab(folio))
+ if (folio_test_slab(folio) || folio_test_large_kmalloc(folio))
return;
folio_put(folio);
@@ -1816,7 +1818,24 @@ static inline pmd_t folio_mk_pmd(struct folio *folio, pgprot_t pgprot)
{
return pmd_mkhuge(pfn_pmd(folio_pfn(folio), pgprot));
}
-#endif
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+/**
+ * folio_mk_pud - Create a PUD for this folio
+ * @folio: The folio to create a PUD for
+ * @pgprot: The page protection bits to use
+ *
+ * Create a page table entry for the first page of this folio.
+ * This is suitable for passing to set_pud_at().
+ *
+ * Return: A page table entry suitable for mapping this folio.
+ */
+static inline pud_t folio_mk_pud(struct folio *folio, pgprot_t pgprot)
+{
+ return pud_mkhuge(pfn_pud(folio_pfn(folio), pgprot));
+}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* CONFIG_MMU */
static inline bool folio_has_pincount(const struct folio *folio)
@@ -2150,13 +2169,13 @@ static inline int folio_expected_ref_count(const struct folio *folio)
const int order = folio_order(folio);
int ref_count = 0;
- if (WARN_ON_ONCE(folio_test_slab(folio)))
+ if (WARN_ON_ONCE(page_has_type(&folio->page) && !folio_test_hugetlb(folio)))
return 0;
if (folio_test_anon(folio)) {
/* One reference per page from the swapcache. */
ref_count += folio_test_swapcache(folio) << order;
- } else if (!((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS)) {
+ } else {
/* One reference per page from the pagecache. */
ref_count += !!folio->mapping << order;
/* One reference from PG_private. */
@@ -2547,7 +2566,7 @@ extern long change_protection(struct mmu_gather *tlb,
unsigned long end, unsigned long cp_flags);
extern int mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
struct vm_area_struct *vma, struct vm_area_struct **pprev,
- unsigned long start, unsigned long end, unsigned long newflags);
+ unsigned long start, unsigned long end, vm_flags_t newflags);
/*
* doesn't attempt to fault and will return short.
@@ -2692,13 +2711,6 @@ static inline pud_t pud_mkspecial(pud_t pud)
}
#endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
-#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
-static inline int pte_devmap(pte_t pte)
-{
- return 0;
-}
-#endif
-
extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl);
static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
@@ -3311,9 +3323,9 @@ extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
const struct vm_special_mapping *sm);
-extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
+struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
- unsigned long flags,
+ vm_flags_t vm_flags,
const struct vm_special_mapping *spec);
unsigned long randomize_stack_top(unsigned long stack_top);
@@ -3477,10 +3489,10 @@ static inline bool range_in_vma(struct vm_area_struct *vma,
}
#ifdef CONFIG_MMU
-pgprot_t vm_get_page_prot(unsigned long vm_flags);
+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
void vma_set_page_prot(struct vm_area_struct *vma);
#else
-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
{
return __pgprot(0);
}
@@ -3517,9 +3529,9 @@ vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot);
vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
- pfn_t pfn);
+ unsigned long pfn);
vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
- unsigned long addr, pfn_t pfn);
+ unsigned long addr, unsigned long pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
@@ -4048,14 +4060,14 @@ unsigned long wp_shared_mapping_range(struct address_space *mapping,
#endif
#ifdef CONFIG_ANON_VMA_NAME
-int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
- unsigned long len_in,
- struct anon_vma_name *anon_name);
+int set_anon_vma_name(unsigned long addr, unsigned long size,
+ const char __user *uname);
#else
-static inline int
-madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
- unsigned long len_in, struct anon_vma_name *anon_name) {
- return 0;
+static inline
+int set_anon_vma_name(unsigned long addr, unsigned long size,
+ const char __user *uname)
+{
+ return -EINVAL;
}
#endif
@@ -4177,15 +4189,34 @@ int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
#define PP_MAGIC_MASK ~(PP_DMA_INDEX_MASK | 0x3UL)
#ifdef CONFIG_PAGE_POOL
-static inline bool page_pool_page_is_pp(struct page *page)
+static inline bool page_pool_page_is_pp(const struct page *page)
{
return (page->pp_magic & PP_MAGIC_MASK) == PP_SIGNATURE;
}
#else
-static inline bool page_pool_page_is_pp(struct page *page)
+static inline bool page_pool_page_is_pp(const struct page *page)
{
return false;
}
#endif
+#define PAGE_SNAPSHOT_FAITHFUL (1 << 0)
+#define PAGE_SNAPSHOT_PG_BUDDY (1 << 1)
+#define PAGE_SNAPSHOT_PG_IDLE (1 << 2)
+
+struct page_snapshot {
+ struct folio folio_snapshot;
+ struct page page_snapshot;
+ unsigned long pfn;
+ unsigned long idx;
+ unsigned long flags;
+};
+
+static inline bool snapshot_page_is_faithful(const struct page_snapshot *ps)
+{
+ return ps->flags & PAGE_SNAPSHOT_FAITHFUL;
+}
+
+void snapshot_page(struct page_snapshot *ps, const struct page *page);
+
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 0f0662157066..08bc2442db93 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -105,7 +105,6 @@ struct page {
unsigned int order;
};
};
- /* See page-flags.h for PAGE_MAPPING_FLAGS */
struct address_space *mapping;
union {
pgoff_t __folio_index; /* Our offset within mapping. */
@@ -1086,7 +1085,7 @@ struct mm_struct {
unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
unsigned long stack_vm; /* VM_STACK */
- unsigned long def_flags;
+ vm_flags_t def_flags;
/**
* @write_protect_seq: Locked when any thread is write
diff --git a/include/linux/mman.h b/include/linux/mman.h
index f4c6346a8fcd..de9e8e6229a4 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -137,7 +137,7 @@ static inline bool arch_validate_flags(unsigned long flags)
/*
* Combine the mmap "prot" argument into "vm_flags" used internally.
*/
-static inline unsigned long
+static inline vm_flags_t
calc_vm_prot_bits(unsigned long prot, unsigned long pkey)
{
return _calc_vm_trans(prot, PROT_READ, VM_READ ) |
@@ -149,7 +149,7 @@ calc_vm_prot_bits(unsigned long prot, unsigned long pkey)
/*
* Combine the mmap "flags" argument into "vm_flags" used internally.
*/
-static inline unsigned long
+static inline vm_flags_t
calc_vm_flag_bits(struct file *file, unsigned long flags)
{
return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index 5da384bd0a26..1f4f44951abe 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -309,6 +309,17 @@ void vma_mark_detached(struct vm_area_struct *vma);
struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
unsigned long address);
+/*
+ * Locks next vma pointed by the iterator. Confirms the locked vma has not
+ * been modified and will retry under mmap_lock protection if modification
+ * was detected. Should be called from read RCU section.
+ * Returns either a valid locked VMA, NULL if no more VMAs or -EINTR if the
+ * process was interrupted.
+ */
+struct vm_area_struct *lock_next_vma(struct mm_struct *mm,
+ struct vma_iterator *iter,
+ unsigned long address);
+
#else /* CONFIG_PER_VMA_LOCK */
static inline void mm_lock_seqcount_init(struct mm_struct *mm) {}
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 7cddfdac2f57..fe3d6d98f8da 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -76,6 +76,7 @@
#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
#define SDIO_DEVICE_ID_BROADCOM_43439 0xa9af
#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf
+#define SDIO_DEVICE_ID_BROADCOM_43751 0xaae7
#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752 0xaae8
#define SDIO_VENDOR_ID_CYPRESS 0x04b4
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index a0a3894900ed..14a45979cccc 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -89,6 +89,17 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi);
} \
unlikely(__ret_warn_once); \
})
+#define VM_WARN_ON_ONCE_VMA(cond, vma) ({ \
+ static bool __section(".data..once") __warned; \
+ int __ret_warn_once = !!(cond); \
+ \
+ if (unlikely(__ret_warn_once && !__warned)) { \
+ dump_vma(vma); \
+ __warned = true; \
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn_once); \
+})
#define VM_WARN_ON_VMG(cond, vmg) ({ \
int __ret_warn = !!(cond); \
\
@@ -115,6 +126,7 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi);
#define VM_WARN_ON_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE_MM(cond, mm) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_ONCE_VMA(cond, vma) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_VMG(cond, vmg) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 283913d42d7b..0c5da9141983 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -38,19 +38,19 @@
#define NR_PAGE_ORDERS (MAX_PAGE_ORDER + 1)
/* Defines the order for the number of pages that have a migrate type. */
-#ifndef CONFIG_PAGE_BLOCK_ORDER
-#define PAGE_BLOCK_ORDER MAX_PAGE_ORDER
+#ifndef CONFIG_PAGE_BLOCK_MAX_ORDER
+#define PAGE_BLOCK_MAX_ORDER MAX_PAGE_ORDER
#else
-#define PAGE_BLOCK_ORDER CONFIG_PAGE_BLOCK_ORDER
-#endif /* CONFIG_PAGE_BLOCK_ORDER */
+#define PAGE_BLOCK_MAX_ORDER CONFIG_PAGE_BLOCK_MAX_ORDER
+#endif /* CONFIG_PAGE_BLOCK_MAX_ORDER */
/*
* The MAX_PAGE_ORDER, which defines the max order of pages to be allocated
- * by the buddy allocator, has to be larger or equal to the PAGE_BLOCK_ORDER,
+ * by the buddy allocator, has to be larger or equal to the PAGE_BLOCK_MAX_ORDER,
* which defines the order for the number of pages that can have a migrate type
*/
-#if (PAGE_BLOCK_ORDER > MAX_PAGE_ORDER)
-#error MAX_PAGE_ORDER must be >= PAGE_BLOCK_ORDER
+#if (PAGE_BLOCK_MAX_ORDER > MAX_PAGE_ORDER)
+#error MAX_PAGE_ORDER must be >= PAGE_BLOCK_MAX_ORDER
#endif
/*
@@ -79,6 +79,9 @@ enum migratetype {
* __free_pageblock_cma() function.
*/
MIGRATE_CMA,
+ __MIGRATE_TYPE_END = MIGRATE_CMA,
+#else
+ __MIGRATE_TYPE_END = MIGRATE_HIGHATOMIC,
#endif
#ifdef CONFIG_MEMORY_ISOLATION
MIGRATE_ISOLATE, /* can't allocate from here */
@@ -92,8 +95,12 @@ extern const char * const migratetype_names[MIGRATE_TYPES];
#ifdef CONFIG_CMA
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
-# define is_migrate_cma_folio(folio, pfn) (MIGRATE_CMA == \
- get_pfnblock_flags_mask(&folio->page, pfn, MIGRATETYPE_MASK))
+/*
+ * __dump_folio() in mm/debug.c passes a folio pointer to on-stack struct folio,
+ * so folio_pfn() cannot be used and pfn is needed.
+ */
+# define is_migrate_cma_folio(folio, pfn) \
+ (get_pfnblock_migratetype(&folio->page, pfn) == MIGRATE_CMA)
#else
# define is_migrate_cma(migratetype) false
# define is_migrate_cma_page(_page) false
@@ -122,14 +129,12 @@ static inline bool migratetype_is_mergeable(int mt)
extern int page_group_by_mobility_disabled;
-#define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1)
+#define get_pageblock_migratetype(page) \
+ get_pfnblock_migratetype(page, page_to_pfn(page))
-#define get_pageblock_migratetype(page) \
- get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)
+#define folio_migratetype(folio) \
+ get_pageblock_migratetype(&folio->page)
-#define folio_migratetype(folio) \
- get_pfnblock_flags_mask(&folio->page, folio_pfn(folio), \
- MIGRATETYPE_MASK)
struct free_area {
struct list_head free_list[MIGRATE_TYPES];
unsigned long nr_free;
@@ -201,7 +206,6 @@ enum node_stat_item {
NR_FILE_PAGES,
NR_FILE_DIRTY,
NR_WRITEBACK,
- NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
NR_SHMEM_THPS,
NR_SHMEM_PMDMAPPED,
diff --git a/include/linux/module.h b/include/linux/module.h
index 5faa1fb1f4b4..a7cac01d95e7 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -14,6 +14,7 @@
#include <linux/buildid.h>
#include <linux/compiler.h>
#include <linux/cache.h>
+#include <linux/cleanup.h>
#include <linux/kmod.h>
#include <linux/init.h>
#include <linux/elf.h>
@@ -304,7 +305,6 @@ struct notifier_block;
#ifdef CONFIG_MODULES
-extern int modules_disabled; /* for sysctl */
/* Get/put a kernel symbol (calls must be symmetric) */
void *__symbol_get(const char *symbol);
void *__symbol_get_gpl(const char *symbol);
@@ -539,7 +539,7 @@ struct module {
struct trace_eval_map **trace_evals;
unsigned int num_trace_evals;
#endif
-#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+#ifdef CONFIG_DYNAMIC_FTRACE
unsigned int num_ftrace_callsites;
unsigned long *ftrace_callsites;
#endif
@@ -1019,4 +1019,7 @@ static inline unsigned long find_kallsyms_symbol_value(struct module *mod,
#endif /* CONFIG_MODULES && CONFIG_KALLSYMS */
+/* Define __free(module_put) macro for struct module *. */
+DEFINE_FREE(module_put, struct module *, if (_T) module_put(_T))
+
#endif /* _LINUX_MODULE_H */
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 63ef5191cc57..fddafdc168f7 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -31,6 +31,7 @@ extern int ip6_mroute_getsockopt(struct sock *, int, sockptr_t, sockptr_t);
extern int ip6_mr_input(struct sk_buff *skb);
extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
extern int ip6_mr_init(void);
+extern int ip6_mr_output(struct net *net, struct sock *sk, struct sk_buff *skb);
extern void ip6_mr_cleanup(void);
int ip6mr_ioctl(struct sock *sk, int cmd, void *arg);
#else
@@ -58,6 +59,12 @@ static inline int ip6_mr_init(void)
return 0;
}
+static inline int
+ip6_mr_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+ return ip6_output(net, sk, skb);
+}
+
static inline void ip6_mr_cleanup(void)
{
return;
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 77227d23ea84..e5e86a8529fb 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -707,7 +707,10 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent);
u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
+u32 pci_msi_map_rid_ctlr_node(struct pci_dev *pdev, struct device_node **node);
struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
+void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg,
+ struct msi_desc *desc);
#else /* CONFIG_PCI_MSI */
static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
{
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 1b56796f6cb3..288ef765a44e 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -8,15 +8,15 @@
#ifndef __LINUX_MTD_MAP_H__
#define __LINUX_MTD_MAP_H__
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/string.h>
#include <linux/bug.h>
-#include <linux/kernel.h>
#include <linux/io.h>
-
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/types.h>
#include <linux/unaligned.h>
-#include <asm/barrier.h>
+
+struct device_node;
+struct module;
#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
#define map_bankwidth(map) 1
@@ -188,6 +188,7 @@ typedef union {
of living.
*/
+struct mtd_chip_driver;
struct map_info {
const char *name;
unsigned long size;
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 15eaa09da998..27a45bdab7ec 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -62,30 +62,33 @@
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(addr, ndummy, buf, len, ...) \
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x03, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 1), \
- SPI_MEM_OP_MAX_FREQ(__VA_ARGS__ + 0))
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x0b, 1), \
- SPI_MEM_OP_ADDR(2, addr, 1), \
- SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 1))
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_1S_OP(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x03, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 1))
+ SPI_MEM_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_FAST_3A_1S_1S_1S_OP(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_FAST_3A_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x0b, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 1))
+ SPI_MEM_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_1D_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x0d, 1), \
@@ -94,17 +97,19 @@
SPI_MEM_DTR_OP_DATA_IN(len, buf, 1), \
SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 2))
+ SPI_MEM_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_2S_OP(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_2S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 2))
+ SPI_MEM_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_2D_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x3d, 1), \
@@ -113,18 +118,19 @@
SPI_MEM_DTR_OP_DATA_IN(len, buf, 2), \
SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(addr, ndummy, buf, len, ...) \
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
SPI_MEM_OP_ADDR(2, addr, 2), \
SPI_MEM_OP_DUMMY(ndummy, 2), \
SPI_MEM_OP_DATA_IN(len, buf, 2), \
- SPI_MEM_OP_MAX_FREQ(__VA_ARGS__ + 0))
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_2S_2S_OP(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_2S_2S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
SPI_MEM_OP_ADDR(3, addr, 2), \
SPI_MEM_OP_DUMMY(ndummy, 2), \
- SPI_MEM_OP_DATA_IN(len, buf, 2))
+ SPI_MEM_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
#define SPINAND_PAGE_READ_FROM_CACHE_1S_2D_2D_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xbd, 1), \
@@ -133,17 +139,19 @@
SPI_MEM_DTR_OP_DATA_IN(len, buf, 2), \
SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 4))
+ SPI_MEM_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_4S_OP(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_4S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 4))
+ SPI_MEM_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_4D_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x6d, 1), \
@@ -152,18 +160,19 @@
SPI_MEM_DTR_OP_DATA_IN(len, buf, 4), \
SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(addr, ndummy, buf, len, ...) \
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
SPI_MEM_OP_ADDR(2, addr, 4), \
SPI_MEM_OP_DUMMY(ndummy, 4), \
SPI_MEM_OP_DATA_IN(len, buf, 4), \
- SPI_MEM_OP_MAX_FREQ(__VA_ARGS__ + 0))
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_4S_4S_OP(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_4S_4S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
SPI_MEM_OP_ADDR(3, addr, 4), \
SPI_MEM_OP_DUMMY(ndummy, 4), \
- SPI_MEM_OP_DATA_IN(len, buf, 4))
+ SPI_MEM_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
#define SPINAND_PAGE_READ_FROM_CACHE_1S_4D_4D_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xed, 1), \
@@ -484,6 +493,7 @@ struct spinand_user_otp {
* @op_variants.update_cache: variants of the update-cache operation
* @select_target: function used to select a target/die. Required only for
* multi-die chips
+ * @configure_chip: Align the chip configuration with the core settings
* @set_cont_read: enable/disable continuous cached reads
* @fact_otp: SPI NAND factory OTP info.
* @user_otp: SPI NAND user OTP info.
@@ -507,6 +517,7 @@ struct spinand_info {
} op_variants;
int (*select_target)(struct spinand_device *spinand,
unsigned int target);
+ int (*configure_chip)(struct spinand_device *spinand);
int (*set_cont_read)(struct spinand_device *spinand,
bool enable);
struct spinand_fact_otp fact_otp;
@@ -539,6 +550,9 @@ struct spinand_info {
#define SPINAND_SELECT_TARGET(__func) \
.select_target = __func
+#define SPINAND_CONFIGURE_CHIP(__configure_chip) \
+ .configure_chip = __configure_chip
+
#define SPINAND_CONT_READ(__set_cont_read) \
.set_cont_read = __set_cont_read
@@ -607,6 +621,7 @@ struct spinand_dirmap {
* passed in spi_mem_op be DMA-able, so we can't based the bufs on
* the stack
* @manufacturer: SPI NAND manufacturer information
+ * @configure_chip: Align the chip configuration with the core settings
* @cont_read_possible: Field filled by the core once the whole system
* configuration is known to tell whether continuous reads are
* suitable to use or not in general with this chip/configuration.
@@ -647,6 +662,7 @@ struct spinand_device {
const struct spinand_manufacturer *manufacturer;
void *priv;
+ int (*configure_chip)(struct spinand_device *spinand);
bool cont_read_possible;
int (*set_cont_read)(struct spinand_device *spinand,
bool enable);
@@ -723,7 +739,9 @@ int spinand_match_and_init(struct spinand_device *spinand,
enum spinand_readid_method rdid_method);
int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
+int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val);
int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val);
+int spinand_write_enable_op(struct spinand_device *spinand);
int spinand_select_target(struct spinand_device *spinand, unsigned int target);
int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us,
diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h
index 562f92504f2b..c3f79c4be1cc 100644
--- a/include/linux/mtd/ubi.h
+++ b/include/linux/mtd/ubi.h
@@ -250,7 +250,6 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum);
int ubi_leb_map(struct ubi_volume_desc *desc, int lnum);
int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum);
int ubi_sync(int ubi_num);
-int ubi_flush(int ubi_num, int vol_id, int lnum);
/*
* This function is the same as the 'ubi_leb_read()' function, but it does not
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index a039fa8c1780..00afd341d293 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -126,11 +126,11 @@ do { \
#ifdef CONFIG_DEBUG_MUTEXES
-int __devm_mutex_init(struct device *dev, struct mutex *lock);
+int __must_check __devm_mutex_init(struct device *dev, struct mutex *lock);
#else
-static inline int __devm_mutex_init(struct device *dev, struct mutex *lock)
+static inline int __must_check __devm_mutex_init(struct device *dev, struct mutex *lock)
{
/*
* When CONFIG_DEBUG_MUTEXES is off mutex_destroy() is just a nop so
@@ -141,14 +141,17 @@ static inline int __devm_mutex_init(struct device *dev, struct mutex *lock)
#endif
-#define devm_mutex_init(dev, mutex) \
+#define __mutex_init_ret(mutex) \
({ \
typeof(mutex) mutex_ = (mutex); \
\
mutex_init(mutex_); \
- __devm_mutex_init(dev, mutex_); \
+ mutex_; \
})
+#define devm_mutex_init(dev, mutex) \
+ __devm_mutex_init(dev, __mutex_init_ret(mutex))
+
/*
* See kernel/locking/mutex.c for detailed documentation of these APIs.
* Also see Documentation/locking/mutex-design.rst.
diff --git a/include/linux/net/intel/iidc_rdma_idpf.h b/include/linux/net/intel/iidc_rdma_idpf.h
new file mode 100644
index 000000000000..bab697e18fd6
--- /dev/null
+++ b/include/linux/net/intel/iidc_rdma_idpf.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2025 Intel Corporation. */
+
+#ifndef _IIDC_RDMA_IDPF_H_
+#define _IIDC_RDMA_IDPF_H_
+
+#include <linux/auxiliary_bus.h>
+
+/* struct to be populated by core LAN PCI driver */
+struct iidc_rdma_vport_dev_info {
+ struct auxiliary_device *adev;
+ struct auxiliary_device *core_adev;
+ struct net_device *netdev;
+ u16 vport_id;
+};
+
+struct iidc_rdma_vport_auxiliary_dev {
+ struct auxiliary_device adev;
+ struct iidc_rdma_vport_dev_info *vdev_info;
+};
+
+struct iidc_rdma_vport_auxiliary_drv {
+ struct auxiliary_driver adrv;
+ void (*event_handler)(struct iidc_rdma_vport_dev_info *vdev,
+ struct iidc_rdma_event *event);
+};
+
+/* struct to be populated by core LAN PCI driver */
+enum iidc_function_type {
+ IIDC_FUNCTION_TYPE_PF,
+ IIDC_FUNCTION_TYPE_VF,
+};
+
+struct iidc_rdma_lan_mapped_mem_region {
+ u8 __iomem *region_addr;
+ __le64 size;
+ __le64 start_offset;
+};
+
+struct iidc_rdma_priv_dev_info {
+ struct msix_entry *msix_entries;
+ u16 msix_count; /* How many vectors are reserved for this device */
+ enum iidc_function_type ftype;
+ __le16 num_memory_regions;
+ struct iidc_rdma_lan_mapped_mem_region *mapped_mem_regions;
+};
+
+int idpf_idc_vport_dev_ctrl(struct iidc_rdma_core_dev_info *cdev_info, bool up);
+int idpf_idc_request_reset(struct iidc_rdma_core_dev_info *cdev_info,
+ enum iidc_rdma_reset_type __always_unused reset_type);
+int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info,
+ u8 *send_msg, u16 msg_size,
+ u8 *recv_msg, u16 *recv_len);
+
+#endif /* _IIDC_RDMA_IDPF_H_ */
diff --git a/include/linux/net/intel/libie/adminq.h b/include/linux/net/intel/libie/adminq.h
new file mode 100644
index 000000000000..012b5d499c1a
--- /dev/null
+++ b/include/linux/net/intel/libie/adminq.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef __LIBIE_ADMINQ_H
+#define __LIBIE_ADMINQ_H
+
+#include <linux/build_bug.h>
+#include <linux/types.h>
+
+#define LIBIE_CHECK_STRUCT_LEN(n, X) \
+ static_assert((n) == sizeof(struct X))
+
+/**
+ * struct libie_aqc_generic - Generic structure used in adminq communication
+ * @param0: generic parameter high 32bit
+ * @param1: generic parameter lower 32bit
+ * @addr_high: generic address high 32bit
+ * @addr_low: generic address lower 32bit
+ */
+struct libie_aqc_generic {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+LIBIE_CHECK_STRUCT_LEN(16, libie_aqc_generic);
+
+/**
+ * struct libie_aqc_get_ver - Used in command get version (direct 0x0001)
+ * @rom_ver: rom version
+ * @fw_build: number coressponding to firmware build
+ * @fw_branch: branch identifier of firmware version
+ * @fw_major: major number of firmware version
+ * @fw_minor: minor number of firmware version
+ * @fw_patch: patch of firmware version
+ * @api_branch: brancch identifier of API version
+ * @api_major: major number of API version
+ * @api_minor: minor number of API version
+ * @api_patch: patch of API version
+ */
+struct libie_aqc_get_ver {
+ __le32 rom_ver;
+ __le32 fw_build;
+ u8 fw_branch;
+ u8 fw_major;
+ u8 fw_minor;
+ u8 fw_patch;
+ u8 api_branch;
+ u8 api_major;
+ u8 api_minor;
+ u8 api_patch;
+};
+LIBIE_CHECK_STRUCT_LEN(16, libie_aqc_get_ver);
+
+/**
+ * struct libie_aqc_driver_ver - Used in command send driver version
+ * (indirect 0x0002)
+ * @major_ver: driver major version
+ * @minor_ver: driver minor version
+ * @build_ver: driver build version
+ * @subbuild_ver: driver subbuild version
+ * @reserved: for feature use
+ * @addr_high: high part of response address buff
+ * @addr_low: low part of response address buff
+ */
+struct libie_aqc_driver_ver {
+ u8 major_ver;
+ u8 minor_ver;
+ u8 build_ver;
+ u8 subbuild_ver;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+LIBIE_CHECK_STRUCT_LEN(16, libie_aqc_driver_ver);
+
+enum libie_aq_res_id {
+ LIBIE_AQC_RES_ID_NVM = 1,
+ LIBIE_AQC_RES_ID_SDP = 2,
+ LIBIE_AQC_RES_ID_CHNG_LOCK = 3,
+ LIBIE_AQC_RES_ID_GLBL_LOCK = 4,
+};
+
+enum libie_aq_res_access_type {
+ LIBIE_AQC_RES_ACCESS_READ = 1,
+ LIBIE_AQC_RES_ACCESS_WRITE = 2,
+};
+
+#define LIBIE_AQ_RES_NVM_READ_DFLT_TIMEOUT_MS 3000
+#define LIBIE_AQ_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
+#define LIBIE_AQ_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
+#define LIBIE_AQ_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
+
+#define LIBIE_AQ_RES_GLBL_SUCCESS 0
+#define LIBIE_AQ_RES_GLBL_IN_PROG 1
+#define LIBIE_AQ_RES_GLBL_DONE 2
+
+/**
+ * struct libie_aqc_req_res - Request resource ownership
+ * @res_id: resource ID (look at enum definition above)
+ * @access_type: read or write (enum definition above)
+ * @timeout: Upon successful completion, FW writes this value and driver is
+ * expected to release resource before timeout. This value is provided in
+ * milliseconds.
+ * @res_number: for SDP, this is the pin ID of the SDP
+ * @status: status only used for LIBIE_AQC_RES_ID_GLBL_LOCK, for others reserved
+ * @reserved: reserved for future use
+ *
+ * Used in commands:
+ * request resource ownership (direct 0x0008)
+ * request resource ownership (direct 0x0009)
+ */
+struct libie_aqc_req_res {
+ __le16 res_id;
+ __le16 access_type;
+
+ __le32 timeout;
+ __le32 res_number;
+ __le16 status;
+ u8 reserved[2];
+};
+LIBIE_CHECK_STRUCT_LEN(16, libie_aqc_req_res);
+
+/**
+ * struct libie_aqc_list_caps - Getting capabilities
+ * @cmd_flags: command flags
+ * @pf_index: index of PF to get caps from
+ * @reserved: reserved for future use
+ * @count: number of capabilities records
+ * @addr_high: high part of response address buff
+ * @addr_low: low part of response address buff
+ *
+ * Used in commands:
+ * get function capabilities (indirect 0x000A)
+ * get device capabilities (indirect 0x000B)
+ */
+struct libie_aqc_list_caps {
+ u8 cmd_flags;
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+LIBIE_CHECK_STRUCT_LEN(16, libie_aqc_list_caps);
+
+/* Device/Function buffer entry, repeated per reported capability */
+#define LIBIE_AQC_CAPS_SWITCH_MODE 0x0001
+#define LIBIE_AQC_CAPS_MNG_MODE 0x0002
+#define LIBIE_AQC_CAPS_NPAR_ACTIVE 0x0003
+#define LIBIE_AQC_CAPS_OS2BMC_CAP 0x0004
+#define LIBIE_AQC_CAPS_VALID_FUNCTIONS 0x0005
+#define LIBIE_AQC_MAX_VALID_FUNCTIONS 0x8
+#define LIBIE_AQC_CAPS_SRIOV 0x0012
+#define LIBIE_AQC_CAPS_VF 0x0013
+#define LIBIE_AQC_CAPS_VMDQ 0x0014
+#define LIBIE_AQC_CAPS_8021QBG 0x0015
+#define LIBIE_AQC_CAPS_8021QBR 0x0016
+#define LIBIE_AQC_CAPS_VSI 0x0017
+#define LIBIE_AQC_CAPS_DCB 0x0018
+#define LIBIE_AQC_CAPS_FCOE 0x0021
+#define LIBIE_AQC_CAPS_ISCSI 0x0022
+#define LIBIE_AQC_CAPS_RSS 0x0040
+#define LIBIE_AQC_CAPS_RXQS 0x0041
+#define LIBIE_AQC_CAPS_TXQS 0x0042
+#define LIBIE_AQC_CAPS_MSIX 0x0043
+#define LIBIE_AQC_CAPS_VF_MSIX 0x0044
+#define LIBIE_AQC_CAPS_FD 0x0045
+#define LIBIE_AQC_CAPS_1588 0x0046
+#define LIBIE_AQC_CAPS_MAX_MTU 0x0047
+#define LIBIE_AQC_CAPS_NVM_VER 0x0048
+#define LIBIE_AQC_CAPS_PENDING_NVM_VER 0x0049
+#define LIBIE_AQC_CAPS_OROM_VER 0x004A
+#define LIBIE_AQC_CAPS_PENDING_OROM_VER 0x004B
+#define LIBIE_AQC_CAPS_NET_VER 0x004C
+#define LIBIE_AQC_CAPS_PENDING_NET_VER 0x004D
+#define LIBIE_AQC_CAPS_RDMA 0x0051
+#define LIBIE_AQC_CAPS_LED 0x0061
+#define LIBIE_AQC_CAPS_SDP 0x0062
+#define LIBIE_AQC_CAPS_MDIO 0x0063
+#define LIBIE_AQC_CAPS_WSR_PROT 0x0064
+#define LIBIE_AQC_CAPS_SENSOR_READING 0x0067
+#define LIBIE_AQC_INLINE_IPSEC 0x0070
+#define LIBIE_AQC_CAPS_NUM_ENABLED_PORTS 0x0072
+#define LIBIE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076
+#define LIBIE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
+#define LIBIE_AQC_CAPS_NVM_MGMT 0x0080
+#define LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG0 0x0081
+#define LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG1 0x0082
+#define LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG2 0x0083
+#define LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG3 0x0084
+#define LIBIE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE 0x0085
+#define LIBIE_AQC_CAPS_NAC_TOPOLOGY 0x0087
+#define LIBIE_AQC_CAPS_FW_LAG_SUPPORT 0x0092
+#define LIBIE_AQC_BIT_ROCEV2_LAG 0x01
+#define LIBIE_AQC_BIT_SRIOV_LAG 0x02
+#define LIBIE_AQC_CAPS_FLEX10 0x00F1
+#define LIBIE_AQC_CAPS_CEM 0x00F2
+
+/**
+ * struct libie_aqc_list_caps_elem - Getting list of caps elements
+ * @cap: one from the defines list above
+ * @major_ver: major version
+ * @minor_ver: minor version
+ * @number: number of resources described by this capability
+ * @logical_id: logical ID, only meaningful for some types of resources
+ * @phys_id: physical ID, only meaningful for some types of resources
+ * @rsvd1: reserved for future use
+ * @rsvd2: reserved for future use
+ */
+struct libie_aqc_list_caps_elem {
+ __le16 cap;
+
+ u8 major_ver;
+ u8 minor_ver;
+ __le32 number;
+ __le32 logical_id;
+ __le32 phys_id;
+ __le64 rsvd1;
+ __le64 rsvd2;
+};
+LIBIE_CHECK_STRUCT_LEN(32, libie_aqc_list_caps_elem);
+
+/**
+ * struct libie_aq_desc - Admin Queue (AQ) descriptor
+ * @flags: LIBIE_AQ_FLAG_* flags
+ * @opcode: AQ command opcode
+ * @datalen: length in bytes of indirect/external data buffer
+ * @retval: return value from firmware
+ * @cookie_high: opaque data high-half
+ * @cookie_low: opaque data low-half
+ * @params: command-specific parameters
+ *
+ * Descriptor format for commands the driver posts on the Admin Transmit Queue
+ * (ATQ). The firmware writes back onto the command descriptor and returns
+ * the result of the command. Asynchronous events that are not an immediate
+ * result of the command are written to the Admin Receive Queue (ARQ) using
+ * the same descriptor format. Descriptors are in little-endian notation with
+ * 32-bit words.
+ */
+struct libie_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ u8 raw[16];
+ struct libie_aqc_generic generic;
+ struct libie_aqc_get_ver get_ver;
+ struct libie_aqc_driver_ver driver_ver;
+ struct libie_aqc_req_res res_owner;
+ struct libie_aqc_list_caps get_cap;
+ } params;
+};
+LIBIE_CHECK_STRUCT_LEN(32, libie_aq_desc);
+
+/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */
+#define LIBIE_AQ_LG_BUF 512
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+#define LIBIE_AQ_FLAG_DD BIT(0) /* 0x1 */
+#define LIBIE_AQ_FLAG_CMP BIT(1) /* 0x2 */
+#define LIBIE_AQ_FLAG_ERR BIT(2) /* 0x4 */
+#define LIBIE_AQ_FLAG_VFE BIT(3) /* 0x8 */
+#define LIBIE_AQ_FLAG_LB BIT(9) /* 0x200 */
+#define LIBIE_AQ_FLAG_RD BIT(10) /* 0x400 */
+#define LIBIE_AQ_FLAG_VFC BIT(11) /* 0x800 */
+#define LIBIE_AQ_FLAG_BUF BIT(12) /* 0x1000 */
+#define LIBIE_AQ_FLAG_SI BIT(13) /* 0x2000 */
+#define LIBIE_AQ_FLAG_EI BIT(14) /* 0x4000 */
+#define LIBIE_AQ_FLAG_FE BIT(15) /* 0x8000 */
+
+/* error codes */
+enum libie_aq_err {
+ LIBIE_AQ_RC_OK = 0, /* Success */
+ LIBIE_AQ_RC_EPERM = 1, /* Operation not permitted */
+ LIBIE_AQ_RC_ENOENT = 2, /* No such element */
+ LIBIE_AQ_RC_ESRCH = 3, /* Bad opcode */
+ LIBIE_AQ_RC_EIO = 5, /* I/O error */
+ LIBIE_AQ_RC_EAGAIN = 8, /* Try again */
+ LIBIE_AQ_RC_ENOMEM = 9, /* Out of memory */
+ LIBIE_AQ_RC_EACCES = 10, /* Permission denied */
+ LIBIE_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ LIBIE_AQ_RC_EEXIST = 13, /* Object already exists */
+ LIBIE_AQ_RC_EINVAL = 14, /* Invalid argument */
+ LIBIE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
+ LIBIE_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ LIBIE_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ LIBIE_AQ_RC_ENOSEC = 24, /* Missing security manifest */
+ LIBIE_AQ_RC_EBADSIG = 25, /* Bad RSA signature */
+ LIBIE_AQ_RC_ESVN = 26, /* SVN number prohibits this package */
+ LIBIE_AQ_RC_EBADMAN = 27, /* Manifest hash mismatch */
+ LIBIE_AQ_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */
+};
+
+static inline void *libie_aq_raw(struct libie_aq_desc *desc)
+{
+ return &desc->params.raw;
+}
+
+const char *libie_aq_str(enum libie_aq_err err);
+
+#endif /* __LIBIE_ADMINQ_H */
diff --git a/include/linux/net/intel/libie/pctype.h b/include/linux/net/intel/libie/pctype.h
new file mode 100644
index 000000000000..d783417fbf36
--- /dev/null
+++ b/include/linux/net/intel/libie/pctype.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef __LIBIE_PCTYPE_H
+#define __LIBIE_PCTYPE_H
+
+/* Packet Classifier Type indexes, used to set the xxQF_HENA registers. Also
+ * communicated over the virtchnl API as part of struct virtchnl_rss_hashena.
+ */
+enum libie_filter_pctype {
+ /* Note: Values 0-28 are reserved for future use.
+ * Value 29, 30, 32 are not supported on XL710 and X710.
+ */
+ LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
+ LIBIE_FILTER_PCTYPE_FRAG_IPV4 = 36,
+ /* Note: Values 37-38 are reserved for future use.
+ * Value 39, 40, 42 are not supported on XL710 and X710.
+ */
+ LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
+ LIBIE_FILTER_PCTYPE_FRAG_IPV6 = 46,
+ /* Note: Value 47 is reserved for future use */
+ LIBIE_FILTER_PCTYPE_FCOE_OX = 48,
+ LIBIE_FILTER_PCTYPE_FCOE_RX = 49,
+ LIBIE_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
+ LIBIE_FILTER_PCTYPE_L2_PAYLOAD = 63
+};
+
+#endif /* __LIBIE_PCTYPE_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index adb14db25798..5e5de4b0a433 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -369,6 +369,7 @@ struct napi_config {
u64 irq_suspend_timeout;
u32 defer_hard_irqs;
cpumask_t affinity_mask;
+ u8 threaded;
unsigned int napi_id;
};
@@ -588,7 +589,9 @@ static inline bool napi_complete(struct napi_struct *n)
return napi_complete_done(n, 0);
}
-int dev_set_threaded(struct net_device *dev, bool threaded);
+void netif_threaded_enable(struct net_device *dev);
+int dev_set_threaded(struct net_device *dev,
+ enum netdev_napi_threaded threaded);
void napi_disable(struct napi_struct *n);
void napi_disable_locked(struct napi_struct *n);
@@ -1870,6 +1873,7 @@ enum netdev_reg_state {
* @addr_len: Hardware address length
* @upper_level: Maximum depth level of upper devices.
* @lower_level: Maximum depth level of lower devices.
+ * @threaded: napi threaded state.
* @neigh_priv_len: Used in neigh_alloc()
* @dev_id: Used to differentiate devices that share
* the same link layer address
@@ -2009,8 +2013,6 @@ enum netdev_reg_state {
* switch driver and used to set the phys state of the
* switch port.
*
- * @threaded: napi threaded mode is enabled
- *
* @irq_affinity_auto: driver wants the core to store and re-assign the IRQ
* affinity. Set by netif_enable_irq_affinity(), then
* the driver must create a persistent napi by
@@ -2246,6 +2248,7 @@ struct net_device {
unsigned char addr_len;
unsigned char upper_level;
unsigned char lower_level;
+ u8 threaded;
unsigned short neigh_priv_len;
unsigned short dev_id;
@@ -2388,7 +2391,7 @@ struct net_device {
struct dm_hw_stat_delta __rcu *dm_private;
#endif
struct device dev;
- const struct attribute_group *sysfs_groups[4];
+ const struct attribute_group *sysfs_groups[5];
const struct attribute_group *sysfs_rx_queue_group;
const struct rtnl_link_ops *rtnl_link_ops;
@@ -2427,7 +2430,6 @@ struct net_device {
struct sfp_bus *sfp_bus;
struct lock_class_key *qdisc_tx_busylock;
bool proto_down;
- bool threaded;
bool irq_affinity_auto;
bool rx_cpu_rmap_auto;
@@ -3016,6 +3018,16 @@ static inline void dev_dstats_rx_dropped(struct net_device *dev)
u64_stats_update_end(&dstats->syncp);
}
+static inline void dev_dstats_rx_dropped_add(struct net_device *dev,
+ unsigned int packets)
+{
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+ u64_stats_add(&dstats->rx_drops, packets);
+ u64_stats_update_end(&dstats->syncp);
+}
+
static inline void dev_dstats_tx_add(struct net_device *dev,
unsigned int len)
{
@@ -3306,13 +3318,6 @@ static inline struct net_device *first_net_device(struct net *net)
net_device_entry(net->dev_base_head.next);
}
-static inline struct net_device *first_net_device_rcu(struct net *net)
-{
- struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
-
- return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
-}
-
int netdev_boot_setup_check(struct net_device *dev);
struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type,
const char *hwaddr);
@@ -3329,8 +3334,6 @@ int dev_get_iflink(const struct net_device *dev);
int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
struct net_device_path_stack *stack);
-struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
- unsigned short mask);
struct net_device *dev_get_by_name(struct net *net, const char *name);
struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
struct net_device *__dev_get_by_name(struct net *net, const char *name);
@@ -3340,7 +3343,7 @@ int netif_open(struct net_device *dev, struct netlink_ext_ack *extack);
int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
void netif_close(struct net_device *dev);
void dev_close(struct net_device *dev);
-void dev_close_many(struct list_head *head, bool unlink);
+void netif_close_many(struct list_head *head, bool unlink);
void netif_disable_lro(struct net_device *dev);
void dev_disable_lro(struct net_device *dev);
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
@@ -3393,6 +3396,8 @@ struct net_device *netdev_get_by_index(struct net *net, int ifindex,
netdevice_tracker *tracker, gfp_t gfp);
struct net_device *netdev_get_by_name(struct net *net, const char *name,
netdevice_tracker *tracker, gfp_t gfp);
+struct net_device *netdev_get_by_flags_rcu(struct net *net, netdevice_tracker *tracker,
+ unsigned short flags, unsigned short mask);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
void netdev_copy_name(struct net_device *dev, char *name);
@@ -4192,7 +4197,7 @@ int generic_hwtstamp_set_lower(struct net_device *dev,
struct kernel_hwtstamp_config *kernel_cfg,
struct netlink_ext_ack *extack);
int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata);
-unsigned int dev_get_flags(const struct net_device *);
+unsigned int netif_get_flags(const struct net_device *dev);
int __dev_change_flags(struct net_device *dev, unsigned int flags,
struct netlink_ext_ack *extack);
int netif_change_flags(struct net_device *dev, unsigned int flags,
@@ -4207,20 +4212,20 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
struct netlink_ext_ack *extack);
int dev_change_net_namespace(struct net_device *dev, struct net *net,
const char *pat);
-int __dev_set_mtu(struct net_device *, int);
+int __netif_set_mtu(struct net_device *dev, int new_mtu);
int netif_set_mtu(struct net_device *dev, int new_mtu);
int dev_set_mtu(struct net_device *, int);
-int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
- struct netlink_ext_ack *extack);
+int netif_pre_changeaddr_notify(struct net_device *dev, const char *addr,
+ struct netlink_ext_ack *extack);
int netif_set_mac_address(struct net_device *dev, struct sockaddr_storage *ss,
struct netlink_ext_ack *extack);
int dev_set_mac_address(struct net_device *dev, struct sockaddr_storage *ss,
struct netlink_ext_ack *extack);
int dev_set_mac_address_user(struct net_device *dev, struct sockaddr_storage *ss,
struct netlink_ext_ack *extack);
-int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
-int dev_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid, bool recurse);
+int netif_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
+int netif_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid, bool recurse);
bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
@@ -5128,10 +5133,9 @@ void netdev_bonding_info_change(struct net_device *dev,
struct netdev_bonding_info *bonding_info);
#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
-void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data);
+void ethtool_notify(struct net_device *dev, unsigned int cmd);
#else
-static inline void ethtool_notify(struct net_device *dev, unsigned int cmd,
- const void *data)
+static inline void ethtool_notify(struct net_device *dev, unsigned int cmd)
{
}
#endif
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 5f896fcc074d..efbbfa770d66 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -92,6 +92,7 @@ enum nf_hook_ops_type {
NF_HOOK_OP_UNDEFINED,
NF_HOOK_OP_NF_TABLES,
NF_HOOK_OP_BPF,
+ NF_HOOK_OP_NFT_FT,
};
struct nf_hook_ops {
diff --git a/include/linux/netfilter/nf_conntrack_dccp.h b/include/linux/netfilter/nf_conntrack_dccp.h
deleted file mode 100644
index c509ed76e714..000000000000
--- a/include/linux/netfilter/nf_conntrack_dccp.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NF_CONNTRACK_DCCP_H
-#define _NF_CONNTRACK_DCCP_H
-
-/* Exposed to userspace over nfnetlink */
-enum ct_dccp_states {
- CT_DCCP_NONE,
- CT_DCCP_REQUEST,
- CT_DCCP_RESPOND,
- CT_DCCP_PARTOPEN,
- CT_DCCP_OPEN,
- CT_DCCP_CLOSEREQ,
- CT_DCCP_CLOSING,
- CT_DCCP_TIMEWAIT,
- CT_DCCP_IGNORE,
- CT_DCCP_INVALID,
- __CT_DCCP_MAX
-};
-#define CT_DCCP_MAX (__CT_DCCP_MAX - 1)
-
-enum ct_dccp_roles {
- CT_DCCP_ROLE_CLIENT,
- CT_DCCP_ROLE_SERVER,
- __CT_DCCP_ROLE_MAX
-};
-#define CT_DCCP_ROLE_MAX (__CT_DCCP_ROLE_MAX - 1)
-
-#include <linux/netfilter/nf_conntrack_tuple_common.h>
-
-struct nf_ct_dccp {
- u_int8_t role[IP_CT_DIR_MAX];
- u_int8_t state;
- u_int8_t last_pkt;
- u_int8_t last_dir;
- u_int64_t handshake_seq;
-};
-
-#endif /* _NF_CONNTRACK_DCCP_H */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index f39f688d7285..77c778d84d4c 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -51,21 +51,11 @@ static inline struct net_device *xt_in(const struct xt_action_param *par)
return par->state->in;
}
-static inline const char *xt_inname(const struct xt_action_param *par)
-{
- return par->state->in->name;
-}
-
static inline struct net_device *xt_out(const struct xt_action_param *par)
{
return par->state->out;
}
-static inline const char *xt_outname(const struct xt_action_param *par)
-{
- return par->state->out->name;
-}
-
static inline unsigned int xt_hooknum(const struct xt_action_param *par)
{
return par->state->hook;
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 0477208ed9ff..b5ea9882eda8 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -15,10 +15,7 @@
#include <linux/refcount.h>
union inet_addr {
- __u32 all[4];
__be32 ip;
- __be32 ip6[4];
- struct in_addr in;
struct in6_addr in6;
};
@@ -42,6 +39,13 @@ struct netpoll {
struct work_struct refill_wq;
};
+#define np_info(np, fmt, ...) \
+ pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
+#define np_err(np, fmt, ...) \
+ pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
+#define np_notice(np, fmt, ...) \
+ pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
+
struct netpoll_info {
refcount_t refcnt;
@@ -65,11 +69,8 @@ static inline void netpoll_poll_enable(struct net_device *dev) { return; }
#endif
int netpoll_send_udp(struct netpoll *np, const char *msg, int len);
-void netpoll_print_options(struct netpoll *np);
-int netpoll_parse_options(struct netpoll *np, char *opt);
int __netpoll_setup(struct netpoll *np, struct net_device *ndev);
int netpoll_setup(struct netpoll *np);
-void __netpoll_cleanup(struct netpoll *np);
void __netpoll_free(struct netpoll *np);
void netpoll_cleanup(struct netpoll *np);
void do_netpoll_cleanup(struct netpoll *np);
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index e78fa535f61d..cf3c6ab408aa 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -103,10 +103,12 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs);
extern void hardlockup_detector_perf_stop(void);
extern void hardlockup_detector_perf_restart(void);
extern void hardlockup_config_perf_event(const char *str);
+extern void hardlockup_detector_perf_adjust_period(u64 period);
#else
static inline void hardlockup_detector_perf_stop(void) { }
static inline void hardlockup_detector_perf_restart(void) { }
static inline void hardlockup_config_perf_event(const char *str) { }
+static inline void hardlockup_detector_perf_adjust_period(u64 period) { }
#endif
void watchdog_hardlockup_stop(void);
diff --git a/include/linux/node.h b/include/linux/node.h
index 2b7517892230..2c7529335b21 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -111,43 +111,64 @@ struct memory_block;
extern struct node *node_devices[];
#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_NUMA)
-void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
- unsigned long end_pfn,
- enum meminit_context context);
+void register_memory_blocks_under_node_hotplug(int nid, unsigned long start_pfn,
+ unsigned long end_pfn);
#else
-static inline void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
- unsigned long end_pfn,
- enum meminit_context context)
+static inline void register_memory_blocks_under_node_hotplug(int nid,
+ unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+}
+static inline void register_memory_blocks_under_nodes(void)
{
}
#endif
extern void unregister_node(struct node *node);
-#ifdef CONFIG_NUMA
-extern void node_dev_init(void);
-/* Core of the node registration - only memory hotplug should use this */
-extern int __register_one_node(int nid);
-
-/* Registers an online node */
-static inline int register_one_node(int nid)
-{
- int error = 0;
- if (node_online(nid)) {
- struct pglist_data *pgdat = NODE_DATA(nid);
- unsigned long start_pfn = pgdat->node_start_pfn;
- unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
+struct node_notify {
+ int nid;
+};
- error = __register_one_node(nid);
- if (error)
- return error;
- register_memory_blocks_under_node(nid, start_pfn, end_pfn,
- MEMINIT_EARLY);
- }
+#define NODE_ADDING_FIRST_MEMORY (1<<0)
+#define NODE_ADDED_FIRST_MEMORY (1<<1)
+#define NODE_CANCEL_ADDING_FIRST_MEMORY (1<<2)
+#define NODE_REMOVING_LAST_MEMORY (1<<3)
+#define NODE_REMOVED_LAST_MEMORY (1<<4)
+#define NODE_CANCEL_REMOVING_LAST_MEMORY (1<<5)
- return error;
+#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_NUMA)
+extern int register_node_notifier(struct notifier_block *nb);
+extern void unregister_node_notifier(struct notifier_block *nb);
+extern int node_notify(unsigned long val, void *v);
+
+#define hotplug_node_notifier(fn, pri) ({ \
+ static __meminitdata struct notifier_block fn##_node_nb =\
+ { .notifier_call = fn, .priority = pri };\
+ register_node_notifier(&fn##_node_nb); \
+})
+#else
+static inline int register_node_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+static inline void unregister_node_notifier(struct notifier_block *nb)
+{
+}
+static inline int node_notify(unsigned long val, void *v)
+{
+ return 0;
+}
+static inline int hotplug_node_notifier(notifier_fn_t fn, int pri)
+{
+ return 0;
}
+#endif
+#ifdef CONFIG_NUMA
+extern void node_dev_init(void);
+/* Core of the node registration - only memory hotplug should use this */
+extern int register_one_node(int nid);
extern void unregister_one_node(int nid);
extern int register_cpu_under_node(unsigned int cpu, unsigned int nid);
extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
@@ -160,10 +181,6 @@ extern int register_memory_node_under_compute_node(unsigned int mem_nid,
static inline void node_dev_init(void)
{
}
-static inline int __register_one_node(int nid)
-{
- return 0;
-}
static inline int register_one_node(int nid)
{
return 0;
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index f08ae71585fa..7ad1f5c7407e 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -492,21 +492,9 @@ static __always_inline int num_node_state(enum node_states state)
static __always_inline int node_random(const nodemask_t *maskp)
{
#if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1)
- int w, bit;
-
- w = nodes_weight(*maskp);
- switch (w) {
- case 0:
- bit = NUMA_NO_NODE;
- break;
- case 1:
- bit = first_node(*maskp);
- break;
- default:
- bit = find_nth_bit(maskp->bits, MAX_NUMNODES, get_random_u32_below(w));
- break;
- }
- return bit;
+ int node = find_random_bit(maskp->bits, MAX_NUMNODES);
+
+ return node < MAX_NUMNODES ? node : NUMA_NO_NODE;
#else
return 0;
#endif
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 6337ad4e5fe8..a480063c9cb1 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -54,6 +54,7 @@ extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
u32 id,
u32 bus_token);
extern void of_msi_configure(struct device *dev, const struct device_node *np);
+extern u32 of_msi_xlate(struct device *dev, struct device_node **msi_np, u32 id_in);
u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in);
#else
static inline void of_irq_init(const struct of_device_id *matches)
@@ -100,6 +101,10 @@ static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev
static inline void of_msi_configure(struct device *dev, struct device_node *np)
{
}
+static inline u32 of_msi_xlate(struct device *dev, struct device_node **msi_np, u32 id_in)
+{
+ return id_in;
+}
static inline u32 of_msi_map_id(struct device *dev,
struct device_node *msi_np, u32 id_in)
{
diff --git a/include/linux/packing.h b/include/linux/packing.h
index 0589d70bbe04..20ae4d452c7b 100644
--- a/include/linux/packing.h
+++ b/include/linux/packing.h
@@ -5,8 +5,12 @@
#ifndef _LINUX_PACKING_H
#define _LINUX_PACKING_H
-#include <linux/types.h>
+#include <linux/array_size.h>
#include <linux/bitops.h>
+#include <linux/build_bug.h>
+#include <linux/minmax.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
#define GEN_PACKED_FIELD_STRUCT(__type) \
struct packed_field_ ## __type { \
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 0146daf34430..765f2778e264 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -90,8 +90,6 @@ struct padata_cpumask {
* @processed: Number of already processed objects.
* @cpu: Next CPU to be processed.
* @cpumask: The cpumasks in use for parallel and serial workers.
- * @reorder_work: work struct for reordering.
- * @lock: Reorder lock.
*/
struct parallel_data {
struct padata_shell *ps;
@@ -102,8 +100,6 @@ struct parallel_data {
unsigned int processed;
int cpu;
struct padata_cpumask cpumask;
- struct work_struct reorder_work;
- spinlock_t ____cacheline_aligned lock;
};
/**
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 4fe5ee67535b..8e4d6eda8a8d 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -167,8 +167,12 @@ enum pageflags {
/* Remapped by swiotlb-xen. */
PG_xen_remapped = PG_owner_priv_1,
- /* non-lru isolated movable page */
- PG_isolated = PG_reclaim,
+#ifdef CONFIG_MIGRATION
+ /* movable_ops page that is isolated for migration */
+ PG_movable_ops_isolated = PG_reclaim,
+ /* this is a movable_ops page (for selected typed pages only) */
+ PG_movable_ops = PG_uptodate,
+#endif
/* Only valid for buddy pages. Used to track pages that are reported */
PG_reported = PG_uptodate,
@@ -691,15 +695,12 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
/*
* On an anonymous folio mapped into a user virtual memory area,
* folio->mapping points to its anon_vma, not to a struct address_space;
- * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
+ * with the FOLIO_MAPPING_ANON bit set to distinguish it. See rmap.h.
*
- * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
- * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
+ * On an anonymous folio in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
+ * the FOLIO_MAPPING_ANON_KSM bit may be set along with the FOLIO_MAPPING_ANON
* bit; and then folio->mapping points, not to an anon_vma, but to a private
- * structure which KSM associates with that merged page. See ksm.h.
- *
- * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
- * page and then folio->mapping points to a struct movable_operations.
+ * structure which KSM associates with that merged folio. See ksm.h.
*
* Please note that, confusingly, "folio_mapping" refers to the inode
* address_space which maps the folio from disk; whereas "folio_mapped"
@@ -712,50 +713,27 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
* false before calling the following functions (e.g., folio_test_anon).
* See mm/slab.h.
*/
-#define PAGE_MAPPING_ANON 0x1
-#define PAGE_MAPPING_MOVABLE 0x2
-#define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
-#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
-
-static __always_inline bool folio_mapping_flags(const struct folio *folio)
-{
- return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0;
-}
-
-static __always_inline bool PageMappingFlags(const struct page *page)
-{
- return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
-}
+#define FOLIO_MAPPING_ANON 0x1
+#define FOLIO_MAPPING_ANON_KSM 0x2
+#define FOLIO_MAPPING_KSM (FOLIO_MAPPING_ANON | FOLIO_MAPPING_ANON_KSM)
+#define FOLIO_MAPPING_FLAGS (FOLIO_MAPPING_ANON | FOLIO_MAPPING_ANON_KSM)
static __always_inline bool folio_test_anon(const struct folio *folio)
{
- return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
+ return ((unsigned long)folio->mapping & FOLIO_MAPPING_ANON) != 0;
}
static __always_inline bool PageAnonNotKsm(const struct page *page)
{
unsigned long flags = (unsigned long)page_folio(page)->mapping;
- return (flags & PAGE_MAPPING_FLAGS) == PAGE_MAPPING_ANON;
+ return (flags & FOLIO_MAPPING_FLAGS) == FOLIO_MAPPING_ANON;
}
static __always_inline bool PageAnon(const struct page *page)
{
return folio_test_anon(page_folio(page));
}
-
-static __always_inline bool __folio_test_movable(const struct folio *folio)
-{
- return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
- PAGE_MAPPING_MOVABLE;
-}
-
-static __always_inline bool __PageMovable(const struct page *page)
-{
- return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
- PAGE_MAPPING_MOVABLE;
-}
-
#ifdef CONFIG_KSM
/*
* A KSM page is one of those write-protected "shared pages" or "merged pages"
@@ -765,8 +743,8 @@ static __always_inline bool __PageMovable(const struct page *page)
*/
static __always_inline bool folio_test_ksm(const struct folio *folio)
{
- return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
- PAGE_MAPPING_KSM;
+ return ((unsigned long)folio->mapping & FOLIO_MAPPING_FLAGS) ==
+ FOLIO_MAPPING_KSM;
}
#else
FOLIO_TEST_FLAG_FALSE(ksm)
@@ -1137,7 +1115,53 @@ static inline bool folio_contain_hwpoisoned_page(struct folio *folio)
bool is_free_buddy_page(const struct page *page);
-PAGEFLAG(Isolated, isolated, PF_ANY);
+#ifdef CONFIG_MIGRATION
+/*
+ * This page is migratable through movable_ops (for selected typed pages
+ * only).
+ *
+ * Page migration of such pages might fail, for example, if the page is
+ * already isolated by somebody else, or if the page is about to get freed.
+ *
+ * While a subsystem might set selected typed pages that support page migration
+ * as being movable through movable_ops, it must never clear this flag.
+ *
+ * This flag is only cleared when the page is freed back to the buddy.
+ *
+ * Only selected page types support this flag (see page_movable_ops()) and
+ * the flag might be used in other context for other pages. Always use
+ * page_has_movable_ops() instead.
+ */
+TESTPAGEFLAG(MovableOps, movable_ops, PF_NO_TAIL);
+SETPAGEFLAG(MovableOps, movable_ops, PF_NO_TAIL);
+/*
+ * A movable_ops page has this flag set while it is isolated for migration.
+ * This flag primarily protects against concurrent migration attempts.
+ *
+ * Once migration ended (success or failure), the flag is cleared. The
+ * flag is managed by the migration core.
+ */
+PAGEFLAG(MovableOpsIsolated, movable_ops_isolated, PF_NO_TAIL);
+#else /* !CONFIG_MIGRATION */
+TESTPAGEFLAG_FALSE(MovableOps, movable_ops);
+SETPAGEFLAG_NOOP(MovableOps, movable_ops);
+PAGEFLAG_FALSE(MovableOpsIsolated, movable_ops_isolated);
+#endif /* CONFIG_MIGRATION */
+
+/**
+ * page_has_movable_ops - test for a movable_ops page
+ * @page: The page to test.
+ *
+ * Test whether this is a movable_ops page. Such pages will stay that
+ * way until freed.
+ *
+ * Returns true if this is a movable_ops page, otherwise false.
+ */
+static inline bool page_has_movable_ops(const struct page *page)
+{
+ return PageMovableOps(page) &&
+ (PageOffline(page) || PageZsmalloc(page));
+}
static __always_inline int PageAnonExclusive(const struct page *page)
{
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 898bb788243b..3e2f960e166c 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -11,6 +11,12 @@ static inline bool is_migrate_isolate(int migratetype)
{
return migratetype == MIGRATE_ISOLATE;
}
+#define get_pageblock_isolate(page) \
+ get_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate)
+#define clear_pageblock_isolate(page) \
+ clear_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate)
+#define set_pageblock_isolate(page) \
+ set_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate)
#else
static inline bool is_migrate_isolate_page(struct page *page)
{
@@ -20,22 +26,45 @@ static inline bool is_migrate_isolate(int migratetype)
{
return false;
}
+static inline bool get_pageblock_isolate(struct page *page)
+{
+ return false;
+}
+static inline void clear_pageblock_isolate(struct page *page)
+{
+}
+static inline void set_pageblock_isolate(struct page *page)
+{
+}
#endif
-#define MEMORY_OFFLINE 0x1
-#define REPORT_FAILURE 0x2
+/*
+ * Pageblock isolation modes:
+ * PB_ISOLATE_MODE_MEM_OFFLINE - isolate to offline (!allocate) memory
+ * e.g., skip over PageHWPoison() pages and
+ * PageOffline() pages. Unmovable pages will be
+ * reported in this mode.
+ * PB_ISOLATE_MODE_CMA_ALLOC - isolate for CMA allocations
+ * PB_ISOLATE_MODE_OTHER - isolate for other purposes
+ */
+enum pb_isolate_mode {
+ PB_ISOLATE_MODE_MEM_OFFLINE,
+ PB_ISOLATE_MODE_CMA_ALLOC,
+ PB_ISOLATE_MODE_OTHER,
+};
-void set_pageblock_migratetype(struct page *page, int migratetype);
+void __meminit init_pageblock_migratetype(struct page *page,
+ enum migratetype migratetype,
+ bool isolate);
-bool move_freepages_block_isolate(struct zone *zone, struct page *page,
- int migratetype);
+bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page);
+bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page);
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- int migratetype, int flags);
+ enum pb_isolate_mode mode);
-void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- int migratetype);
+void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
- int isol_flags);
+ enum pb_isolate_mode mode);
#endif
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index debdc25f08b9..3328357f6dba 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -14,7 +14,7 @@ extern void __set_page_owner(struct page *page,
extern void __split_page_owner(struct page *page, int old_order,
int new_order);
extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
-extern void __set_page_owner_migrate_reason(struct page *page, int reason);
+extern void __folio_set_owner_migrate_reason(struct folio *folio, int reason);
extern void __dump_page_owner(const struct page *page);
extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone);
@@ -43,10 +43,10 @@ static inline void folio_copy_owner(struct folio *newfolio, struct folio *old)
if (static_branch_unlikely(&page_owner_inited))
__folio_copy_owner(newfolio, old);
}
-static inline void set_page_owner_migrate_reason(struct page *page, int reason)
+static inline void folio_set_owner_migrate_reason(struct folio *folio, int reason)
{
if (static_branch_unlikely(&page_owner_inited))
- __set_page_owner_migrate_reason(page, reason);
+ __folio_set_owner_migrate_reason(folio, reason);
}
static inline void dump_page_owner(const struct page *page)
{
@@ -68,7 +68,7 @@ static inline void split_page_owner(struct page *page, int old_order,
static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)
{
}
-static inline void set_page_owner_migrate_reason(struct page *page, int reason)
+static inline void folio_set_owner_migrate_reason(struct folio *folio, int reason)
{
}
static inline void dump_page_owner(const struct page *page)
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index e73a4292ef02..6a44be0f39f4 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -19,15 +19,33 @@ enum pageblock_bits {
PB_migrate,
PB_migrate_end = PB_migrate + PB_migratetype_bits - 1,
/* 3 bits required for migrate types */
- PB_migrate_skip,/* If set the block is skipped by compaction */
+ PB_compact_skip,/* If set the block is skipped by compaction */
+#ifdef CONFIG_MEMORY_ISOLATION
+ /*
+ * Pageblock isolation is represented with a separate bit, so that
+ * the migratetype of a block is not overwritten by isolation.
+ */
+ PB_migrate_isolate, /* If set the block is isolated */
+#endif
/*
* Assume the bits will always align on a word. If this assumption
* changes then get/set pageblock needs updating.
*/
- NR_PAGEBLOCK_BITS
+ __NR_PAGEBLOCK_BITS
};
+#define NR_PAGEBLOCK_BITS (roundup_pow_of_two(__NR_PAGEBLOCK_BITS))
+
+#define MIGRATETYPE_MASK ((1UL << (PB_migrate_end + 1)) - 1)
+
+#ifdef CONFIG_MEMORY_ISOLATION
+#define MIGRATETYPE_AND_ISO_MASK \
+ (((1UL << (PB_migrate_end + 1)) - 1) | BIT(PB_migrate_isolate))
+#else
+#define MIGRATETYPE_AND_ISO_MASK MIGRATETYPE_MASK
+#endif
+
#if defined(CONFIG_HUGETLB_PAGE)
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
@@ -41,18 +59,18 @@ extern unsigned int pageblock_order;
* Huge pages are a constant size, but don't exceed the maximum allocation
* granularity.
*/
-#define pageblock_order MIN_T(unsigned int, HUGETLB_PAGE_ORDER, PAGE_BLOCK_ORDER)
+#define pageblock_order MIN_T(unsigned int, HUGETLB_PAGE_ORDER, PAGE_BLOCK_MAX_ORDER)
#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
#elif defined(CONFIG_TRANSPARENT_HUGEPAGE)
-#define pageblock_order MIN_T(unsigned int, HPAGE_PMD_ORDER, PAGE_BLOCK_ORDER)
+#define pageblock_order MIN_T(unsigned int, HPAGE_PMD_ORDER, PAGE_BLOCK_MAX_ORDER)
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
-/* If huge pages are not used, group by PAGE_BLOCK_ORDER */
-#define pageblock_order PAGE_BLOCK_ORDER
+/* If huge pages are not used, group by PAGE_BLOCK_MAX_ORDER */
+#define pageblock_order PAGE_BLOCK_MAX_ORDER
#endif /* CONFIG_HUGETLB_PAGE */
@@ -65,27 +83,23 @@ extern unsigned int pageblock_order;
/* Forward declaration */
struct page;
-unsigned long get_pfnblock_flags_mask(const struct page *page,
- unsigned long pfn,
- unsigned long mask);
-
-void set_pfnblock_flags_mask(struct page *page,
- unsigned long flags,
- unsigned long pfn,
- unsigned long mask);
+enum migratetype get_pfnblock_migratetype(const struct page *page,
+ unsigned long pfn);
+bool get_pfnblock_bit(const struct page *page, unsigned long pfn,
+ enum pageblock_bits pb_bit);
+void set_pfnblock_bit(const struct page *page, unsigned long pfn,
+ enum pageblock_bits pb_bit);
+void clear_pfnblock_bit(const struct page *page, unsigned long pfn,
+ enum pageblock_bits pb_bit);
/* Declarations for getting and setting flags. See mm/page_alloc.c */
#ifdef CONFIG_COMPACTION
#define get_pageblock_skip(page) \
- get_pfnblock_flags_mask(page, page_to_pfn(page), \
- (1 << (PB_migrate_skip)))
+ get_pfnblock_bit(page, page_to_pfn(page), PB_compact_skip)
#define clear_pageblock_skip(page) \
- set_pfnblock_flags_mask(page, 0, page_to_pfn(page), \
- (1 << PB_migrate_skip))
+ clear_pfnblock_bit(page, page_to_pfn(page), PB_compact_skip)
#define set_pageblock_skip(page) \
- set_pfnblock_flags_mask(page, (1 << PB_migrate_skip), \
- page_to_pfn(page), \
- (1 << PB_migrate_skip))
+ set_pfnblock_bit(page, page_to_pfn(page), PB_compact_skip)
#else
static inline bool get_pageblock_skip(struct page *page)
{
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index ce2bcdcadb73..12a12dae727d 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -502,7 +502,7 @@ static inline pgoff_t mapping_align_index(struct address_space *mapping,
static inline bool mapping_large_folio_support(struct address_space *mapping)
{
/* AS_FOLIO_ORDER is only reasonable for pagecache folios */
- VM_WARN_ONCE((unsigned long)mapping & PAGE_MAPPING_ANON,
+ VM_WARN_ONCE((unsigned long)mapping & FOLIO_MAPPING_ANON,
"Anonymous mapping always supports large folio");
return mapping_max_folio_order(mapping) > 0;
@@ -905,7 +905,8 @@ static inline struct page *find_or_create_page(struct address_space *mapping,
* @mapping: target address_space
* @index: the page index
*
- * Same as grab_cache_page(), but do not wait if the page is unavailable.
+ * Returns locked page at given index in given cache, creating it if
+ * needed, but do not wait if the page is locked or to reclaim memory.
* This is intended for speculative data generators, where the data can
* be regenerated if the page couldn't be grabbed. This routine should
* be safe to call while holding the lock for another page.
@@ -969,15 +970,6 @@ unsigned filemap_get_folios_contig(struct address_space *mapping,
unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch);
-/*
- * Returns locked page at given index in given cache, creating it if needed.
- */
-static inline struct page *grab_cache_page(struct address_space *mapping,
- pgoff_t index)
-{
- return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
-}
-
struct folio *read_cache_folio(struct address_space *, pgoff_t index,
filler_t *filler, struct file *file);
struct folio *mapping_read_folio_gfp(struct address_space *, pgoff_t index,
diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h
index 9700a29f8afb..682472c15495 100644
--- a/include/linux/pagewalk.h
+++ b/include/linux/pagewalk.h
@@ -14,6 +14,8 @@ enum page_walk_lock {
PGWALK_WRLOCK = 1,
/* vma is expected to be already write-locked during the walk */
PGWALK_WRLOCK_VERIFY = 2,
+ /* vma is expected to be already read-locked during the walk */
+ PGWALK_VMA_RDLOCK_VERIFY = 3,
};
/**
@@ -129,10 +131,9 @@ struct mm_walk {
int walk_page_range(struct mm_struct *mm, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
void *private);
-int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
- unsigned long end, const struct mm_walk_ops *ops,
- pgd_t *pgd,
- void *private);
+int walk_kernel_page_table_range(unsigned long start,
+ unsigned long end, const struct mm_walk_ops *ops,
+ pgd_t *pgd, void *private);
int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
void *private);
diff --git a/include/linux/panic.h b/include/linux/panic.h
index 4adc65766935..7be742628c25 100644
--- a/include/linux/panic.h
+++ b/include/linux/panic.h
@@ -3,6 +3,7 @@
#define _LINUX_PANIC_H
#include <linux/compiler_attributes.h>
+#include <linux/stdarg.h>
#include <linux/types.h>
struct pt_regs;
@@ -10,6 +11,8 @@ struct pt_regs;
extern long (*panic_blink)(int state);
__printf(1, 2)
void panic(const char *fmt, ...) __noreturn __cold;
+__printf(1, 0)
+void vpanic(const char *fmt, va_list args) __noreturn __cold;
void nmi_panic(struct pt_regs *regs, const char *msg);
void check_panic_on_warn(const char *origin);
extern void oops_enter(void);
@@ -25,8 +28,6 @@ extern int panic_on_warn;
extern unsigned long panic_on_taint;
extern bool panic_on_taint_nousertaint;
-extern int sysctl_panic_on_rcu_stall;
-extern int sysctl_max_rcu_stall_to_panic;
extern int sysctl_panic_on_stackoverflow;
extern bool crash_kexec_post_notifiers;
diff --git a/include/linux/pci-tph.h b/include/linux/pci-tph.h
index c3e806c13d64..9e4e331b1603 100644
--- a/include/linux/pci-tph.h
+++ b/include/linux/pci-tph.h
@@ -28,6 +28,7 @@ int pcie_tph_get_cpu_st(struct pci_dev *dev,
unsigned int cpu_uid, u16 *tag);
void pcie_disable_tph(struct pci_dev *pdev);
int pcie_enable_tph(struct pci_dev *pdev, int mode);
+u16 pcie_tph_get_st_table_size(struct pci_dev *pdev);
#else
static inline int pcie_tph_set_st_entry(struct pci_dev *pdev,
unsigned int index, u16 tag)
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index c16cdeaa505e..12d90360f6db 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -63,14 +63,15 @@
* 1. The symbol must be globally unique, even the static ones.
* 2. Static percpu variables cannot be defined inside a function.
*
- * Archs which need weak percpu definitions should define
- * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary.
+ * Archs which need weak percpu definitions should set
+ * CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU when necessary.
*
* To ensure that the generic code observes the above two
* restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak
* definition is used for all cases.
*/
-#if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU)
+#if (defined(CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU) && defined(MODULE)) || \
+ defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU)
/*
* __pcpu_scope_* dummy variable is used to enforce scope. It
* receives the static modifier when it's used in front of
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 6dc5e0cd76ca..93c9a26492fc 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -70,6 +70,11 @@ struct pmu_hw_events {
struct arm_pmu *percpu_pmu;
int irq;
+
+ struct perf_branch_stack *branch_stack;
+
+ /* Active events requesting branch records */
+ unsigned int branch_users;
};
enum armpmu_attr_groups {
@@ -115,6 +120,7 @@ struct arm_pmu {
/* PMUv3 only */
int pmuver;
u64 reg_pmmir;
+ u64 reg_brbidr;
#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
#define ARMV8_PMUV3_EXT_COMMON_EVENT_BASE 0x4000
@@ -126,6 +132,8 @@ struct arm_pmu {
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
+DECLARE_PER_CPU(struct arm_pmu *, cpu_armpmu);
+
u64 armpmu_event_update(struct perf_event *event);
int armpmu_event_set_period(struct perf_event *event);
diff --git a/include/linux/pfn.h b/include/linux/pfn.h
index 14bc053c53d8..b90ca0b6c331 100644
--- a/include/linux/pfn.h
+++ b/include/linux/pfn.h
@@ -4,15 +4,6 @@
#ifndef __ASSEMBLY__
#include <linux/types.h>
-
-/*
- * pfn_t: encapsulates a page-frame number that is optionally backed
- * by memmap (struct page). Whether a pfn_t has a 'struct page'
- * backing is indicated by flags in the high bits of the value.
- */
-typedef struct {
- u64 val;
-} pfn_t;
#endif
#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
deleted file mode 100644
index 2d9148221e9a..000000000000
--- a/include/linux/pfn_t.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_PFN_T_H_
-#define _LINUX_PFN_T_H_
-#include <linux/mm.h>
-
-/*
- * PFN_FLAGS_MASK - mask of all the possible valid pfn_t flags
- * PFN_SG_CHAIN - pfn is a pointer to the next scatterlist entry
- * PFN_SG_LAST - pfn references a page and is the last scatterlist entry
- * PFN_DEV - pfn is not covered by system memmap by default
- * PFN_MAP - pfn has a dynamic page mapping established by a device driver
- * PFN_SPECIAL - for CONFIG_FS_DAX_LIMITED builds to allow XIP, but not
- * get_user_pages
- */
-#define PFN_FLAGS_MASK (((u64) (~PAGE_MASK)) << (BITS_PER_LONG_LONG - PAGE_SHIFT))
-#define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1))
-#define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2))
-#define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3))
-#define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4))
-#define PFN_SPECIAL (1ULL << (BITS_PER_LONG_LONG - 5))
-
-#define PFN_FLAGS_TRACE \
- { PFN_SPECIAL, "SPECIAL" }, \
- { PFN_SG_CHAIN, "SG_CHAIN" }, \
- { PFN_SG_LAST, "SG_LAST" }, \
- { PFN_DEV, "DEV" }, \
- { PFN_MAP, "MAP" }
-
-static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags)
-{
- pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), };
-
- return pfn_t;
-}
-
-/* a default pfn to pfn_t conversion assumes that @pfn is pfn_valid() */
-static inline pfn_t pfn_to_pfn_t(unsigned long pfn)
-{
- return __pfn_to_pfn_t(pfn, 0);
-}
-
-static inline pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags)
-{
- return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags);
-}
-
-static inline bool pfn_t_has_page(pfn_t pfn)
-{
- return (pfn.val & PFN_MAP) == PFN_MAP || (pfn.val & PFN_DEV) == 0;
-}
-
-static inline unsigned long pfn_t_to_pfn(pfn_t pfn)
-{
- return pfn.val & ~PFN_FLAGS_MASK;
-}
-
-static inline struct page *pfn_t_to_page(pfn_t pfn)
-{
- if (pfn_t_has_page(pfn))
- return pfn_to_page(pfn_t_to_pfn(pfn));
- return NULL;
-}
-
-static inline phys_addr_t pfn_t_to_phys(pfn_t pfn)
-{
- return PFN_PHYS(pfn_t_to_pfn(pfn));
-}
-
-static inline pfn_t page_to_pfn_t(struct page *page)
-{
- return pfn_to_pfn_t(page_to_pfn(page));
-}
-
-static inline int pfn_t_valid(pfn_t pfn)
-{
- return pfn_valid(pfn_t_to_pfn(pfn));
-}
-
-#ifdef CONFIG_MMU
-static inline pte_t pfn_t_pte(pfn_t pfn, pgprot_t pgprot)
-{
- return pfn_pte(pfn_t_to_pfn(pfn), pgprot);
-}
-#endif
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot)
-{
- return pfn_pmd(pfn_t_to_pfn(pfn), pgprot);
-}
-
-#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
-static inline pud_t pfn_t_pud(pfn_t pfn, pgprot_t pgprot)
-{
- return pfn_pud(pfn_t_to_pfn(pfn), pgprot);
-}
-#endif
-#endif
-
-#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
-static inline bool pfn_t_devmap(pfn_t pfn)
-{
- const u64 flags = PFN_DEV|PFN_MAP;
-
- return (pfn.val & flags) == flags;
-}
-#else
-static inline bool pfn_t_devmap(pfn_t pfn)
-{
- return false;
-}
-pte_t pte_mkdevmap(pte_t pte);
-pmd_t pmd_mkdevmap(pmd_t pmd);
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
- defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
-pud_t pud_mkdevmap(pud_t pud);
-#endif
-#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
-
-#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
-static inline bool pfn_t_special(pfn_t pfn)
-{
- return (pfn.val & PFN_SPECIAL) == PFN_SPECIAL;
-}
-#else
-static inline bool pfn_t_special(pfn_t pfn)
-{
- return false;
-}
-#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
-#endif /* _LINUX_PFN_T_H_ */
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 0b6e1f781d86..e3b99920be05 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -456,6 +456,17 @@ static inline bool arch_has_hw_pte_young(void)
}
#endif
+#ifndef exec_folio_order
+/*
+ * Returns preferred minimum folio order for executable file-backed memory. Must
+ * be in range [0, PMD_ORDER). Default to order-0.
+ */
+static inline unsigned int exec_folio_order(void)
+{
+ return 0;
+}
+#endif
+
#ifndef arch_check_zapped_pte
static inline void arch_check_zapped_pte(struct vm_area_struct *vma,
pte_t pte)
@@ -1320,7 +1331,9 @@ static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
/*
* Commit an update to a pte, leaving any hardware-controlled bits in
- * the PTE unmodified.
+ * the PTE unmodified. The pte returned from ptep_modify_prot_start() may
+ * additionally have young and/or dirty bits set where previously they were not,
+ * so the updated pte may have these additional changes.
*/
static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr,
@@ -1329,6 +1342,86 @@ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
__ptep_modify_prot_commit(vma, addr, ptep, pte);
}
#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
+
+/**
+ * modify_prot_start_ptes - Start a pte protection read-modify-write transaction
+ * over a batch of ptes, which protects against asynchronous hardware
+ * modifications to the ptes. The intention is not to prevent the hardware from
+ * making pte updates, but to prevent any updates it may make from being lost.
+ * Please see the comment above ptep_modify_prot_start() for full description.
+ *
+ * @vma: The virtual memory area the pages are mapped into.
+ * @addr: Address the first page is mapped at.
+ * @ptep: Page table pointer for the first entry.
+ * @nr: Number of entries.
+ *
+ * May be overridden by the architecture; otherwise, implemented as a simple
+ * loop over ptep_modify_prot_start(), collecting the a/d bits from each pte
+ * in the batch.
+ *
+ * Note that PTE bits in the PTE batch besides the PFN can differ.
+ *
+ * Context: The caller holds the page table lock. The PTEs map consecutive
+ * pages that belong to the same folio. All other PTE bits must be identical for
+ * all PTEs in the batch except for young and dirty bits. The PTEs are all in
+ * the same PMD.
+ */
+#ifndef modify_prot_start_ptes
+static inline pte_t modify_prot_start_ptes(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep, unsigned int nr)
+{
+ pte_t pte, tmp_pte;
+
+ pte = ptep_modify_prot_start(vma, addr, ptep);
+ while (--nr) {
+ ptep++;
+ addr += PAGE_SIZE;
+ tmp_pte = ptep_modify_prot_start(vma, addr, ptep);
+ if (pte_dirty(tmp_pte))
+ pte = pte_mkdirty(pte);
+ if (pte_young(tmp_pte))
+ pte = pte_mkyoung(pte);
+ }
+ return pte;
+}
+#endif
+
+/**
+ * modify_prot_commit_ptes - Commit an update to a batch of ptes, leaving any
+ * hardware-controlled bits in the PTE unmodified.
+ *
+ * @vma: The virtual memory area the pages are mapped into.
+ * @addr: Address the first page is mapped at.
+ * @ptep: Page table pointer for the first entry.
+ * @old_pte: Old page table entry (for the first entry) which is now cleared.
+ * @pte: New page table entry to be set.
+ * @nr: Number of entries.
+ *
+ * May be overridden by the architecture; otherwise, implemented as a simple
+ * loop over ptep_modify_prot_commit().
+ *
+ * Context: The caller holds the page table lock. The PTEs are all in the same
+ * PMD. On exit, the set ptes in the batch map the same folio. The ptes set by
+ * ptep_modify_prot_start() may additionally have young and/or dirty bits set
+ * where previously they were not, so the updated ptes may have these
+ * additional changes.
+ */
+#ifndef modify_prot_commit_ptes
+static inline void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep, pte_t old_pte, pte_t pte, unsigned int nr)
+{
+ int i;
+
+ for (i = 0; i < nr; ++i, ++ptep, addr += PAGE_SIZE) {
+ ptep_modify_prot_commit(vma, addr, ptep, old_pte, pte);
+
+ /* Advance PFN only, set same prot */
+ old_pte = pte_next_pfn(old_pte);
+ pte = pte_next_pfn(pte);
+ }
+}
+#endif
+
#endif /* CONFIG_MMU */
/*
@@ -1632,21 +1725,6 @@ static inline int pud_write(pud_t pud)
}
#endif /* pud_write */
-#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
-static inline int pmd_devmap(pmd_t pmd)
-{
- return 0;
-}
-static inline int pud_devmap(pud_t pud)
-{
- return 0;
-}
-static inline int pgd_devmap(pgd_t pgd)
-{
- return 0;
-}
-#endif
-
#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
!defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
static inline int pud_trans_huge(pud_t pud)
@@ -1661,7 +1739,7 @@ static inline int pud_trans_unstable(pud_t *pud)
defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
pud_t pudval = READ_ONCE(*pud);
- if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval))
+ if (pud_none(pudval) || pud_trans_huge(pudval))
return 1;
if (unlikely(pud_bad(pudval))) {
pud_clear_bad(pud);
@@ -1901,8 +1979,8 @@ typedef unsigned int pgtbl_mod_mask;
* - It should contain a huge PFN, which points to a huge page larger than
* PAGE_SIZE of the platform. The PFN format isn't important here.
*
- * - It should cover all kinds of huge mappings (e.g., pXd_trans_huge(),
- * pXd_devmap(), or hugetlb mappings).
+ * - It should cover all kinds of huge mappings (i.e. pXd_trans_huge()
+ * or hugetlb mappings).
*/
#ifndef pgd_leaf
#define pgd_leaf(x) false
@@ -2005,7 +2083,7 @@ typedef unsigned int pgtbl_mod_mask;
* x: (yes) yes
*/
#define DECLARE_VM_GET_PAGE_PROT \
-pgprot_t vm_get_page_prot(unsigned long vm_flags) \
+pgprot_t vm_get_page_prot(vm_flags_t vm_flags) \
{ \
return protection_map[vm_flags & \
(VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)]; \
diff --git a/include/linux/phy.h b/include/linux/phy.h
index e194dad1623d..4c2b8b6e7187 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -103,6 +103,10 @@ extern const int phy_basic_ports_array[3];
* @PHY_INTERFACE_MODE_QUSGMII: Quad Universal SGMII
* @PHY_INTERFACE_MODE_1000BASEKX: 1000Base-KX - with Clause 73 AN
* @PHY_INTERFACE_MODE_10G_QXGMII: 10G-QXGMII - 4 ports over 10G USXGMII
+ * @PHY_INTERFACE_MODE_50GBASER: 50GBase-R - with Clause 134 FEC
+ * @PHY_INTERFACE_MODE_LAUI: 50 Gigabit Attachment Unit Interface
+ * @PHY_INTERFACE_MODE_100GBASEP: 100GBase-P - with Clause 134 FEC
+ * @PHY_INTERFACE_MODE_MIILITE: MII-Lite - MII without RXER TXER CRS COL
* @PHY_INTERFACE_MODE_MAX: Book keeping
*
* Describes the interface between the MAC and PHY.
@@ -144,6 +148,10 @@ typedef enum {
PHY_INTERFACE_MODE_QUSGMII,
PHY_INTERFACE_MODE_1000BASEKX,
PHY_INTERFACE_MODE_10G_QXGMII,
+ PHY_INTERFACE_MODE_50GBASER,
+ PHY_INTERFACE_MODE_LAUI,
+ PHY_INTERFACE_MODE_100GBASEP,
+ PHY_INTERFACE_MODE_MIILITE,
PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
@@ -260,6 +268,14 @@ static inline const char *phy_modes(phy_interface_t interface)
return "qusgmii";
case PHY_INTERFACE_MODE_10G_QXGMII:
return "10g-qxgmii";
+ case PHY_INTERFACE_MODE_50GBASER:
+ return "50gbase-r";
+ case PHY_INTERFACE_MODE_LAUI:
+ return "laui";
+ case PHY_INTERFACE_MODE_100GBASEP:
+ return "100gbase-p";
+ case PHY_INTERFACE_MODE_MIILITE:
+ return "mii-lite";
default:
return "unknown";
}
@@ -269,8 +285,10 @@ static inline const char *phy_modes(phy_interface_t interface)
* rgmii_clock - map link speed to the clock rate
* @speed: link speed value
*
- * Description: maps RGMII supported link speeds
- * into the clock rates.
+ * Description: maps RGMII supported link speeds into the clock rates.
+ * This can also be used for MII, GMII, and RMII interface modes as the
+ * clock rates are indentical, but the caller must be aware that errors
+ * for unsupported clock rates will not be signalled.
*
* Returns: clock rate or negative errno
*/
@@ -395,8 +413,10 @@ struct mii_bus {
/** @shared_lock: protect access to the shared element */
struct mutex shared_lock;
+#if IS_ENABLED(CONFIG_PHY_PACKAGE)
/** @shared: shared state across different PHYs */
struct phy_package_shared *shared[PHY_MAX_ADDR];
+#endif
};
#define to_mii_bus(d) container_of(d, struct mii_bus, dev)
@@ -526,6 +546,7 @@ struct macsec_ops;
* @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY
* @wol_enabled: Set to true if the PHY or the attached MAC have Wake-on-LAN
* enabled.
+ * @is_genphy_driven: PHY is driven by one of the generic PHY drivers
* @state: State of the PHY for management purposes
* @dev_flags: Device-specific flags used by the PHY driver.
*
@@ -629,6 +650,7 @@ struct phy_device {
unsigned is_on_sfp_module:1;
unsigned mac_managed_pm:1;
unsigned wol_enabled:1;
+ unsigned is_genphy_driven:1;
unsigned autoneg:1;
/* The most recently read link state */
@@ -702,9 +724,11 @@ struct phy_device {
/* For use by PHYs to maintain extra state */
void *priv;
+#if IS_ENABLED(CONFIG_PHY_PACKAGE)
/* shared data pointer */
/* For use by PHYs inside the same package that need a shared state. */
struct phy_package_shared *shared;
+#endif
/* Reporting cable test results */
struct sk_buff *skb;
@@ -1292,6 +1316,17 @@ static inline bool phy_is_started(struct phy_device *phydev)
}
/**
+ * phy_driver_is_genphy - Convenience function to check whether PHY is driven
+ * by one of the generic PHY drivers
+ * @phydev: The phy_device struct
+ * Return: true if PHY is driven by one of the genphy drivers
+ */
+static inline bool phy_driver_is_genphy(struct phy_device *phydev)
+{
+ return phydev->is_genphy_driven;
+}
+
+/**
* phy_disable_eee_mode - Don't advertise an EEE mode.
* @phydev: The phy_device struct
* @link_mode: The EEE mode to be disabled
@@ -1941,9 +1976,6 @@ int genphy_c45_ethtool_set_eee(struct phy_device *phydev,
struct ethtool_keee *data);
int genphy_c45_an_config_eee_aneg(struct phy_device *phydev);
-/* Generic C45 PHY driver */
-extern struct phy_driver genphy_c45_driver;
-
/* The gen10g_* functions are the old Clause 45 stub */
int gen10g_config_aneg(struct phy_device *phydev);
@@ -1997,8 +2029,8 @@ bool phy_validate_pause(struct phy_device *phydev,
struct ethtool_pauseparam *pp);
void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause);
-s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
- const int *delay_values, int size, bool is_rx);
+s32 phy_get_internal_delay(struct phy_device *phydev, const int *delay_values,
+ int size, bool is_rx);
int phy_get_tx_amplitude_gain(struct phy_device *phydev, struct device *dev,
enum ethtool_link_mode_bit_indices linkmode,
@@ -2096,7 +2128,4 @@ module_exit(phy_module_exit)
#define module_phy_driver(__phy_drivers) \
phy_module_driver(__phy_drivers, ARRAY_SIZE(__phy_drivers))
-bool phy_driver_is_genphy(struct phy_device *phydev);
-bool phy_driver_is_genphy_10g(struct phy_device *phydev);
-
#endif /* __PHY_H */
diff --git a/include/linux/platform_data/emc2305.h b/include/linux/platform_data/emc2305.h
index 54d672dd6f7d..76043a97f975 100644
--- a/include/linux/platform_data/emc2305.h
+++ b/include/linux/platform_data/emc2305.h
@@ -9,14 +9,20 @@
* struct emc2305_platform_data - EMC2305 driver platform data
* @max_state: maximum cooling state of the cooling device;
* @pwm_num: number of active channels;
+ * @pwm_output_mask: PWM output mask
+ * @pwm_polarity_mask: PWM polarity mask
* @pwm_separate: separate PWM settings for every channel;
* @pwm_min: array of minimum PWM per channel;
+ * @pwm_freq: array of PWM frequency per channel
*/
struct emc2305_platform_data {
u8 max_state;
u8 pwm_num;
+ u8 pwm_output_mask;
+ u8 pwm_polarity_mask;
bool pwm_separate;
u8 pwm_min[EMC2305_PWM_MAX];
+ u16 pwm_freq[EMC2305_PWM_MAX];
};
#endif
diff --git a/include/linux/platform_data/microchip-ksz.h b/include/linux/platform_data/microchip-ksz.h
index 0e0e8fe6975f..028781ad4059 100644
--- a/include/linux/platform_data/microchip-ksz.h
+++ b/include/linux/platform_data/microchip-ksz.h
@@ -23,6 +23,7 @@
#include <linux/platform_data/dsa.h>
enum ksz_chip_id {
+ KSZ8463_CHIP_ID = 0x8463,
KSZ8563_CHIP_ID = 0x8563,
KSZ8795_CHIP_ID = 0x8795,
KSZ8794_CHIP_ID = 0x8794,
diff --git a/include/linux/platform_data/video-pxafb.h b/include/linux/platform_data/video-pxafb.h
index 6333bac166a5..38c24c77ba43 100644
--- a/include/linux/platform_data/video-pxafb.h
+++ b/include/linux/platform_data/video-pxafb.h
@@ -150,7 +150,6 @@ struct pxafb_mach_info {
};
void pxa_set_fb_info(struct device *, struct pxafb_mach_info *);
-unsigned long pxafb_get_hsync_time(struct device *dev);
/* smartpanel related */
#define SMART_CMD_A0 (0x1 << 8)
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 0cca01b5607b..f21f806bfb38 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -232,7 +232,6 @@ struct power_supply;
/* Run-time specific power supply configuration */
struct power_supply_config {
- struct device_node *of_node;
struct fwnode_handle *fwnode;
/* Driver private data */
@@ -808,19 +807,10 @@ static inline void power_supply_put(struct power_supply *psy) {}
static inline struct power_supply *power_supply_get_by_name(const char *name)
{ return NULL; }
#endif
-#ifdef CONFIG_OF
-extern struct power_supply *power_supply_get_by_phandle(struct device_node *np,
- const char *property);
-extern struct power_supply *devm_power_supply_get_by_phandle(
+extern struct power_supply *power_supply_get_by_reference(struct fwnode_handle *fwnode,
+ const char *property);
+extern struct power_supply *devm_power_supply_get_by_reference(
struct device *dev, const char *property);
-#else /* !CONFIG_OF */
-static inline struct power_supply *
-power_supply_get_by_phandle(struct device_node *np, const char *property)
-{ return NULL; }
-static inline struct power_supply *
-devm_power_supply_get_by_phandle(struct device *dev, const char *property)
-{ return NULL; }
-#endif /* CONFIG_OF */
extern const enum power_supply_property power_supply_battery_info_properties[];
extern const size_t power_supply_battery_info_properties_size;
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index b0af8d4ef6e6..1fad1c8a4c76 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -369,8 +369,6 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
#endif
-#ifdef CONFIG_SMP
-
/*
* Migrate-Disable and why it is undesired.
*
@@ -429,13 +427,6 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
extern void migrate_disable(void);
extern void migrate_enable(void);
-#else
-
-static inline void migrate_disable(void) { }
-static inline void migrate_enable(void) { }
-
-#endif /* CONFIG_SMP */
-
/**
* preempt_disable_nested - Disable preemption inside a normally preempt disabled section
*
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 5b462029d03c..5d22b803f51e 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -154,6 +154,8 @@ int vprintk_emit(int facility, int level,
asmlinkage __printf(1, 0)
int vprintk(const char *fmt, va_list args);
+__printf(1, 0)
+int vprintk_deferred(const char *fmt, va_list args);
asmlinkage __printf(1, 2) __cold
int _printk(const char *fmt, ...);
@@ -214,6 +216,11 @@ int vprintk(const char *s, va_list args)
{
return 0;
}
+static inline __printf(1, 0)
+int vprintk_deferred(const char *fmt, va_list args)
+{
+ return 0;
+}
static inline __printf(1, 2) __cold
int _printk(const char *s, ...)
{
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index de1d24f19f76..f139377f4b31 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -27,6 +27,7 @@ enum {
PROC_ENTRY_proc_read_iter = 1U << 1,
PROC_ENTRY_proc_compat_ioctl = 1U << 2,
+ PROC_ENTRY_proc_lseek = 1U << 3,
PROC_ENTRY_FORCE_LOOKUP = 1U << 7,
};
diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h
index c773eeb92d04..4e5696cfade7 100644
--- a/include/linux/pse-pd/pse.h
+++ b/include/linux/pse-pd/pse.h
@@ -6,13 +6,18 @@
#define _LINUX_PSE_CONTROLLER_H
#include <linux/list.h>
+#include <linux/netlink.h>
+#include <linux/kfifo.h>
#include <uapi/linux/ethtool.h>
+#include <uapi/linux/ethtool_netlink_generated.h>
+#include <linux/regulator/driver.h>
/* Maximum current in uA according to IEEE 802.3-2022 Table 145-1 */
#define MAX_PI_CURRENT 1920000
/* Maximum power in mW according to IEEE 802.3-2022 Table 145-16 */
#define MAX_PI_PW 99900
+struct net_device;
struct phy_device;
struct pse_controller_dev;
struct netlink_ext_ack;
@@ -38,6 +43,19 @@ struct ethtool_c33_pse_pw_limit_range {
};
/**
+ * struct pse_irq_desc - notification sender description for IRQ based events.
+ *
+ * @name: the visible name for the IRQ
+ * @map_event: driver callback to map IRQ status into PSE devices with events.
+ */
+struct pse_irq_desc {
+ const char *name;
+ int (*map_event)(int irq, struct pse_controller_dev *pcdev,
+ unsigned long *notifs,
+ unsigned long *notifs_mask);
+};
+
+/**
* struct pse_control_config - PSE control/channel configuration.
*
* @podl_admin_control: set PoDL PSE admin control as described in
@@ -98,6 +116,7 @@ struct pse_pw_limit_ranges {
/**
* struct ethtool_pse_control_status - PSE control/channel status.
*
+ * @pw_d_id: PSE power domain index.
* @podl_admin_state: operational state of the PoDL PSE
* functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState
* @podl_pw_status: power detection status of the PoDL PSE.
@@ -117,8 +136,12 @@ struct pse_pw_limit_ranges {
* is in charge of the memory allocation
* @c33_pw_limit_nb_ranges: number of supported power limit configuration
* ranges
+ * @prio_max: max priority allowed for the c33_prio variable value.
+ * @prio: priority of the PSE. Managed by PSE core in case of static budget
+ * evaluation strategy.
*/
struct ethtool_pse_control_status {
+ u32 pw_d_id;
enum ethtool_podl_pse_admin_state podl_admin_state;
enum ethtool_podl_pse_pw_d_status podl_pw_status;
enum ethtool_c33_pse_admin_state c33_admin_state;
@@ -129,12 +152,20 @@ struct ethtool_pse_control_status {
u32 c33_avail_pw_limit;
struct ethtool_c33_pse_pw_limit_range *c33_pw_limit_ranges;
u32 c33_pw_limit_nb_ranges;
+ u32 prio_max;
+ u32 prio;
};
/**
* struct pse_controller_ops - PSE controller driver callbacks
*
- * @setup_pi_matrix: setup PI matrix of the PSE controller
+ * @setup_pi_matrix: Setup PI matrix of the PSE controller.
+ * The PSE PIs devicetree nodes have already been parsed by
+ * of_load_pse_pis() and the pcdev->pi[x]->pairset[y].np
+ * populated. This callback should establish the
+ * relationship between the PSE controller hardware ports
+ * and the PSE Power Interfaces, either through software
+ * mapping or hardware configuration.
* @pi_get_admin_state: Get the operational state of the PSE PI. This ops
* is mandatory.
* @pi_get_pw_status: Get the power detection status of the PSE PI. This
@@ -152,6 +183,11 @@ struct ethtool_pse_control_status {
* range. The driver is in charge of the memory
* allocation and should return the number of
* ranges.
+ * @pi_get_prio: Get the PSE PI priority.
+ * @pi_set_prio: Configure the PSE PI priority.
+ * @pi_get_pw_req: Get the power requested by a PD before enabling the PSE PI.
+ * This is only relevant when an interrupt is registered using
+ * devm_pse_irq_helper helper.
*/
struct pse_controller_ops {
int (*setup_pi_matrix)(struct pse_controller_dev *pcdev);
@@ -172,6 +208,10 @@ struct pse_controller_ops {
int id, int max_mW);
int (*pi_get_pw_limit_ranges)(struct pse_controller_dev *pcdev, int id,
struct pse_pw_limit_ranges *pw_limit_ranges);
+ int (*pi_get_prio)(struct pse_controller_dev *pcdev, int id);
+ int (*pi_set_prio)(struct pse_controller_dev *pcdev, int id,
+ unsigned int prio);
+ int (*pi_get_pw_req)(struct pse_controller_dev *pcdev, int id);
};
struct module;
@@ -206,12 +246,35 @@ struct pse_pi_pairset {
* @np: device node pointer of the PSE PI node
* @rdev: regulator represented by the PSE PI
* @admin_state_enabled: PI enabled state
+ * @pw_d: Power domain of the PSE PI
+ * @prio: Priority of the PSE PI. Used in static budget evaluation strategy
+ * @isr_pd_detected: PSE PI detection status managed by the interruption
+ * handler. This variable is relevant when the power enabled
+ * management is managed in software like the static
+ * budget evaluation strategy.
+ * @pw_allocated_mW: Power allocated to a PSE PI to manage power budget in
+ * static budget evaluation strategy.
*/
struct pse_pi {
struct pse_pi_pairset pairset[2];
struct device_node *np;
struct regulator_dev *rdev;
bool admin_state_enabled;
+ struct pse_power_domain *pw_d;
+ int prio;
+ bool isr_pd_detected;
+ int pw_allocated_mW;
+};
+
+/**
+ * struct pse_ntf - PSE notification element
+ *
+ * @id: ID of the PSE control
+ * @notifs: PSE notifications to be reported
+ */
+struct pse_ntf {
+ int id;
+ unsigned long notifs;
};
/**
@@ -228,6 +291,13 @@ struct pse_pi {
* @types: types of the PSE controller
* @pi: table of PSE PIs described in this controller device
* @no_of_pse_pi: flag set if the pse_pis devicetree node is not used
+ * @irq: PSE interrupt
+ * @pis_prio_max: Maximum value allowed for the PSE PIs priority
+ * @supp_budget_eval_strategies: budget evaluation strategies supported
+ * by the PSE
+ * @ntf_work: workqueue for PSE notification management
+ * @ntf_fifo: PSE notifications FIFO
+ * @ntf_fifo_lock: protect @ntf_fifo writer
*/
struct pse_controller_dev {
const struct pse_controller_ops *ops;
@@ -241,6 +311,30 @@ struct pse_controller_dev {
enum ethtool_pse_types types;
struct pse_pi *pi;
bool no_of_pse_pi;
+ int irq;
+ unsigned int pis_prio_max;
+ u32 supp_budget_eval_strategies;
+ struct work_struct ntf_work;
+ DECLARE_KFIFO_PTR(ntf_fifo, struct pse_ntf);
+ spinlock_t ntf_fifo_lock; /* Protect @ntf_fifo writer */
+};
+
+/**
+ * enum pse_budget_eval_strategies - PSE budget evaluation strategies.
+ * @PSE_BUDGET_EVAL_STRAT_DISABLED: Budget evaluation strategy disabled.
+ * @PSE_BUDGET_EVAL_STRAT_STATIC: PSE static budget evaluation strategy.
+ * Budget evaluation strategy based on the power requested during PD
+ * classification. This strategy is managed by the PSE core.
+ * @PSE_BUDGET_EVAL_STRAT_DYNAMIC: PSE dynamic budget evaluation
+ * strategy. Budget evaluation strategy based on the current consumption
+ * per ports compared to the total power budget. This mode is managed by
+ * the PSE controller.
+ */
+
+enum pse_budget_eval_strategies {
+ PSE_BUDGET_EVAL_STRAT_DISABLED = 1 << 0,
+ PSE_BUDGET_EVAL_STRAT_STATIC = 1 << 1,
+ PSE_BUDGET_EVAL_STRAT_DYNAMIC = 1 << 2,
};
#if IS_ENABLED(CONFIG_PSE_CONTROLLER)
@@ -249,8 +343,11 @@ void pse_controller_unregister(struct pse_controller_dev *pcdev);
struct device;
int devm_pse_controller_register(struct device *dev,
struct pse_controller_dev *pcdev);
+int devm_pse_irq_helper(struct pse_controller_dev *pcdev, int irq,
+ int irq_flags, const struct pse_irq_desc *d);
-struct pse_control *of_pse_control_get(struct device_node *node);
+struct pse_control *of_pse_control_get(struct device_node *node,
+ struct phy_device *phydev);
void pse_control_put(struct pse_control *psec);
int pse_ethtool_get_status(struct pse_control *psec,
@@ -262,13 +359,17 @@ int pse_ethtool_set_config(struct pse_control *psec,
int pse_ethtool_set_pw_limit(struct pse_control *psec,
struct netlink_ext_ack *extack,
const unsigned int pw_limit);
+int pse_ethtool_set_prio(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ unsigned int prio);
bool pse_has_podl(struct pse_control *psec);
bool pse_has_c33(struct pse_control *psec);
#else
-static inline struct pse_control *of_pse_control_get(struct device_node *node)
+static inline struct pse_control *of_pse_control_get(struct device_node *node,
+ struct phy_device *phydev)
{
return ERR_PTR(-ENOENT);
}
@@ -298,6 +399,13 @@ static inline int pse_ethtool_set_pw_limit(struct pse_control *psec,
return -EOPNOTSUPP;
}
+static inline int pse_ethtool_set_prio(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ unsigned int prio)
+{
+ return -EOPNOTSUPP;
+}
+
static inline bool pse_has_podl(struct pse_control *psec)
{
return false;
diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
index f1fd3a8044e0..dd10c22299ab 100644
--- a/include/linux/psi_types.h
+++ b/include/linux/psi_types.h
@@ -84,11 +84,9 @@ enum psi_aggregators {
struct psi_group_cpu {
/* 1st cacheline updated by the scheduler */
- /* Aggregator needs to know of concurrent changes */
- seqcount_t seq ____cacheline_aligned_in_smp;
-
/* States of the tasks belonging to this group */
- unsigned int tasks[NR_PSI_TASK_COUNTS];
+ unsigned int tasks[NR_PSI_TASK_COUNTS]
+ ____cacheline_aligned_in_smp;
/* Aggregate pressure state derived from the tasks */
u32 state_mask;
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index eced7e9bf69a..3d089bd4d5e9 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -477,40 +477,14 @@ static inline ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp,
static inline void ptp_read_system_prets(struct ptp_system_timestamp *sts)
{
- if (sts) {
- switch (sts->clockid) {
- case CLOCK_REALTIME:
- ktime_get_real_ts64(&sts->pre_ts);
- break;
- case CLOCK_MONOTONIC:
- ktime_get_ts64(&sts->pre_ts);
- break;
- case CLOCK_MONOTONIC_RAW:
- ktime_get_raw_ts64(&sts->pre_ts);
- break;
- default:
- break;
- }
- }
+ if (sts)
+ ktime_get_clock_ts64(sts->clockid, &sts->pre_ts);
}
static inline void ptp_read_system_postts(struct ptp_system_timestamp *sts)
{
- if (sts) {
- switch (sts->clockid) {
- case CLOCK_REALTIME:
- ktime_get_real_ts64(&sts->post_ts);
- break;
- case CLOCK_MONOTONIC:
- ktime_get_ts64(&sts->post_ts);
- break;
- case CLOCK_MONOTONIC_RAW:
- ktime_get_raw_ts64(&sts->post_ts);
- break;
- default:
- break;
- }
- }
+ if (sts)
+ ktime_get_clock_ts64(sts->clockid, &sts->post_ts);
}
#endif
diff --git a/include/linux/ref_tracker.h b/include/linux/ref_tracker.h
index 8eac4f3d5254..d10563afd91c 100644
--- a/include/linux/ref_tracker.h
+++ b/include/linux/ref_tracker.h
@@ -6,6 +6,8 @@
#include <linux/spinlock.h>
#include <linux/stackdepot.h>
+#define __ostream_printf __printf(2, 3)
+
struct ref_tracker;
struct ref_tracker_dir {
@@ -17,15 +19,45 @@ struct ref_tracker_dir {
bool dead;
struct list_head list; /* List of active trackers */
struct list_head quarantine; /* List of dead trackers */
- char name[32];
+ const char *class; /* object classname */
#endif
};
#ifdef CONFIG_REF_TRACKER
+#ifdef CONFIG_DEBUG_FS
+
+void ref_tracker_dir_debugfs(struct ref_tracker_dir *dir);
+void ref_tracker_dir_symlink(struct ref_tracker_dir *dir, const char *fmt, ...);
+
+#else /* CONFIG_DEBUG_FS */
+
+static inline void ref_tracker_dir_debugfs(struct ref_tracker_dir *dir)
+{
+}
+
+static inline __ostream_printf
+void ref_tracker_dir_symlink(struct ref_tracker_dir *dir, const char *fmt, ...)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * ref_tracker_dir_init - initialize a ref_tracker dir
+ * @dir: ref_tracker_dir to be initialized
+ * @quarantine_count: max number of entries to be tracked
+ * @class: pointer to static string that describes object type
+ *
+ * Initialize a ref_tracker_dir. If debugfs is configured, then a file
+ * will also be created for it under the top-level ref_tracker debugfs
+ * directory.
+ *
+ * Note that @class must point to a static string.
+ */
static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir,
unsigned int quarantine_count,
- const char *name)
+ const char *class)
{
INIT_LIST_HEAD(&dir->list);
INIT_LIST_HEAD(&dir->quarantine);
@@ -34,7 +66,8 @@ static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir,
dir->dead = false;
refcount_set(&dir->untracked, 1);
refcount_set(&dir->no_tracker, 1);
- strscpy(dir->name, name, sizeof(dir->name));
+ dir->class = class;
+ ref_tracker_dir_debugfs(dir);
stack_depot_init();
}
@@ -58,7 +91,16 @@ int ref_tracker_free(struct ref_tracker_dir *dir,
static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir,
unsigned int quarantine_count,
- const char *name)
+ const char *class)
+{
+}
+
+static inline void ref_tracker_dir_debugfs(struct ref_tracker_dir *dir)
+{
+}
+
+static inline __ostream_printf
+void ref_tracker_dir_symlink(struct ref_tracker_dir *dir, const char *fmt, ...)
{
}
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index cd7f0ae26615..bc90c3c7b5fd 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -152,9 +152,7 @@ ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events);
struct ring_buffer_iter *
-ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags);
-void ring_buffer_read_prepare_sync(void);
-void ring_buffer_read_start(struct ring_buffer_iter *iter);
+ring_buffer_read_start(struct trace_buffer *buffer, int cpu, gfp_t flags);
void ring_buffer_read_finish(struct ring_buffer_iter *iter);
struct ring_buffer_event *
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index c4f4903b1088..20803fcb49a7 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -893,7 +893,7 @@ static inline int folio_try_share_anon_rmap_pmd(struct folio *folio,
* Called from mm/vmscan.c to handle paging out
*/
int folio_referenced(struct folio *, int is_locked,
- struct mem_cgroup *memcg, unsigned long *vm_flags);
+ struct mem_cgroup *memcg, vm_flags_t *vm_flags);
void try_to_migrate(struct folio *folio, enum ttu_flags flags);
void try_to_unmap(struct folio *, enum ttu_flags flags);
@@ -1025,7 +1025,7 @@ struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio,
static inline int folio_referenced(struct folio *folio, int is_locked,
struct mem_cgroup *memcg,
- unsigned long *vm_flags)
+ vm_flags_t *vm_flags)
{
*vm_flags = 0;
return 0;
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 7d049883a08a..fa9f1021541e 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -18,7 +18,7 @@
#include <linux/rbtree_types.h>
#include <linux/spinlock_types_raw.h>
-extern int max_lock_depth; /* for sysctl */
+extern int max_lock_depth;
struct rt_mutex_base {
raw_spinlock_t wait_lock;
diff --git a/include/linux/rv.h b/include/linux/rv.h
index 3452b5e4b29e..14410a42faef 100644
--- a/include/linux/rv.h
+++ b/include/linux/rv.h
@@ -7,9 +7,17 @@
#ifndef _LINUX_RV_H
#define _LINUX_RV_H
-#define MAX_DA_NAME_LEN 32
+#include <linux/types.h>
+#include <linux/list.h>
+
+#define MAX_DA_NAME_LEN 32
+#define MAX_DA_RETRY_RACING_EVENTS 3
#ifdef CONFIG_RV
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/array_size.h>
+
/*
* Deterministic automaton per-object variables.
*/
@@ -18,27 +26,72 @@ struct da_monitor {
unsigned int curr_state;
};
+#ifdef CONFIG_RV_LTL_MONITOR
+
/*
- * Per-task RV monitors count. Nowadays fixed in RV_PER_TASK_MONITORS.
- * If we find justification for more monitors, we can think about
- * adding more or developing a dynamic method. So far, none of
- * these are justified.
+ * In the future, if the number of atomic propositions or the size of Buchi
+ * automaton is larger, we can switch to dynamic allocation. For now, the code
+ * is simpler this way.
*/
-#define RV_PER_TASK_MONITORS 1
-#define RV_PER_TASK_MONITOR_INIT (RV_PER_TASK_MONITORS)
+#define RV_MAX_LTL_ATOM 32
+#define RV_MAX_BA_STATES 32
-/*
- * Futher monitor types are expected, so make this a union.
+/**
+ * struct ltl_monitor - A linear temporal logic runtime verification monitor
+ * @states: States in the Buchi automaton. As Buchi automaton is a
+ * non-deterministic state machine, the monitor can be in multiple
+ * states simultaneously. This is a bitmask of all possible states.
+ * If this is zero, that means either:
+ * - The monitor has not started yet (e.g. because not all
+ * atomic propositions are known).
+ * - There is no possible state to be in. In other words, a
+ * violation of the LTL property is detected.
+ * @atoms: The values of atomic propositions.
+ * @unknown_atoms: Atomic propositions which are still unknown.
*/
+struct ltl_monitor {
+ DECLARE_BITMAP(states, RV_MAX_BA_STATES);
+ DECLARE_BITMAP(atoms, RV_MAX_LTL_ATOM);
+ DECLARE_BITMAP(unknown_atoms, RV_MAX_LTL_ATOM);
+};
+
+static inline bool rv_ltl_valid_state(struct ltl_monitor *mon)
+{
+ for (int i = 0; i < ARRAY_SIZE(mon->states); ++i) {
+ if (mon->states[i])
+ return true;
+ }
+ return false;
+}
+
+static inline bool rv_ltl_all_atoms_known(struct ltl_monitor *mon)
+{
+ for (int i = 0; i < ARRAY_SIZE(mon->unknown_atoms); ++i) {
+ if (mon->unknown_atoms[i])
+ return false;
+ }
+ return true;
+}
+
+#else
+
+struct ltl_monitor {};
+
+#endif /* CONFIG_RV_LTL_MONITOR */
+
+#define RV_PER_TASK_MONITOR_INIT (CONFIG_RV_PER_TASK_MONITORS)
+
union rv_task_monitor {
- struct da_monitor da_mon;
+ struct da_monitor da_mon;
+ struct ltl_monitor ltl_mon;
};
#ifdef CONFIG_RV_REACTORS
struct rv_reactor {
const char *name;
const char *description;
- void (*react)(char *msg);
+ __printf(1, 2) void (*react)(const char *msg, ...);
+ struct list_head list;
};
#endif
@@ -50,8 +103,12 @@ struct rv_monitor {
void (*disable)(void);
void (*reset)(void);
#ifdef CONFIG_RV_REACTORS
- void (*react)(char *msg);
+ struct rv_reactor *reactor;
+ __printf(1, 2) void (*react)(const char *msg, ...);
#endif
+ struct list_head list;
+ struct rv_monitor *parent;
+ struct dentry *root_d;
};
bool rv_monitoring_on(void);
@@ -64,6 +121,11 @@ void rv_put_task_monitor_slot(int slot);
bool rv_reacting_on(void);
int rv_unregister_reactor(struct rv_reactor *reactor);
int rv_register_reactor(struct rv_reactor *reactor);
+#else
+static inline bool rv_reacting_on(void)
+{
+ return false;
+}
#endif /* CONFIG_RV_REACTORS */
#endif /* CONFIG_RV */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 63a9b6e23b07..a9693f179c58 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -34,6 +34,7 @@
#include <linux/sched/prio.h>
#include <linux/sched/types.h>
#include <linux/signal_types.h>
+#include <linux/spinlock.h>
#include <linux/syscall_user_dispatch_types.h>
#include <linux/mm_types_task.h>
#include <linux/netdevice_xmit.h>
@@ -339,9 +340,11 @@ extern void io_schedule_finish(int token);
extern long io_schedule_timeout(long timeout);
extern void io_schedule(void);
-/* wrapper function to trace from this header file */
+/* wrapper functions to trace from this header file */
DECLARE_TRACEPOINT(sched_set_state_tp);
extern void __trace_set_current_state(int state_value);
+DECLARE_TRACEPOINT(sched_set_need_resched_tp);
+extern void __trace_set_need_resched(struct task_struct *curr, int tif);
/**
* struct prev_cputime - snapshot of system and user cputime
@@ -395,15 +398,10 @@ enum uclamp_id {
UCLAMP_CNT
};
-#ifdef CONFIG_SMP
extern struct root_domain def_root_domain;
extern struct mutex sched_domains_mutex;
extern void sched_domains_mutex_lock(void);
extern void sched_domains_mutex_unlock(void);
-#else
-static inline void sched_domains_mutex_lock(void) { }
-static inline void sched_domains_mutex_unlock(void) { }
-#endif
struct sched_param {
int sched_priority;
@@ -584,7 +582,15 @@ struct sched_entity {
u64 sum_exec_runtime;
u64 prev_sum_exec_runtime;
u64 vruntime;
- s64 vlag;
+ union {
+ /*
+ * When !@on_rq this field is vlag.
+ * When cfs_rq->curr == se (which implies @on_rq)
+ * this field is vprot. See protect_slice().
+ */
+ s64 vlag;
+ u64 vprot;
+ };
u64 slice;
u64 nr_migrations;
@@ -600,7 +606,6 @@ struct sched_entity {
unsigned long runnable_weight;
#endif
-#ifdef CONFIG_SMP
/*
* Per entity load average tracking.
*
@@ -608,7 +613,6 @@ struct sched_entity {
* collide with read-mostly values above.
*/
struct sched_avg avg;
-#endif
};
struct sched_rt_entity {
@@ -701,6 +705,7 @@ struct sched_dl_entity {
unsigned int dl_defer : 1;
unsigned int dl_defer_armed : 1;
unsigned int dl_defer_running : 1;
+ unsigned int dl_server_idle : 1;
/*
* Bandwidth enforcement timer. Each -deadline task has its
@@ -838,7 +843,6 @@ struct task_struct {
struct alloc_tag *alloc_tag;
#endif
-#ifdef CONFIG_SMP
int on_cpu;
struct __call_single_node wake_entry;
unsigned int wakee_flips;
@@ -854,7 +858,6 @@ struct task_struct {
*/
int recent_used_cpu;
int wake_cpu;
-#endif
int on_rq;
int prio;
@@ -913,9 +916,7 @@ struct task_struct {
cpumask_t *user_cpus_ptr;
cpumask_t cpus_mask;
void *migration_pending;
-#ifdef CONFIG_SMP
unsigned short migration_disabled;
-#endif
unsigned short migration_flags;
#ifdef CONFIG_PREEMPT_RCU
@@ -947,10 +948,8 @@ struct task_struct {
struct sched_info sched_info;
struct list_head tasks;
-#ifdef CONFIG_SMP
struct plist_node pushable_tasks;
struct rb_node pushable_dl_tasks;
-#endif
struct mm_struct *mm;
struct mm_struct *active_mm;
@@ -1234,10 +1233,7 @@ struct task_struct {
struct rt_mutex_waiter *pi_blocked_on;
#endif
-#ifdef CONFIG_DEBUG_MUTEXES
- /* Mutex deadlock detection: */
- struct mutex_waiter *blocked_on;
-#endif
+ struct mutex *blocked_on; /* lock we're blocked on */
#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
/*
@@ -1640,12 +1636,10 @@ struct task_struct {
#ifdef CONFIG_RV
/*
- * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS.
- * If we find justification for more monitors, we can think
- * about adding more or developing a dynamic method. So far,
- * none of these are justified.
+ * Per-task RV monitor, fixed in CONFIG_RV_PER_TASK_MONITORS.
+ * If memory becomes a concern, we can think about a dynamic method.
*/
- union rv_task_monitor rv[RV_PER_TASK_MONITORS];
+ union rv_task_monitor rv[CONFIG_RV_PER_TASK_MONITORS];
#endif
#ifdef CONFIG_USER_EVENTS
@@ -1662,6 +1656,19 @@ struct task_struct {
randomized_struct_fields_end
} __attribute__ ((aligned (64)));
+#ifdef CONFIG_SCHED_PROXY_EXEC
+DECLARE_STATIC_KEY_TRUE(__sched_proxy_exec);
+static inline bool sched_proxy_exec(void)
+{
+ return static_branch_likely(&__sched_proxy_exec);
+}
+#else
+static inline bool sched_proxy_exec(void)
+{
+ return false;
+}
+#endif
+
#define TASK_REPORT_IDLE (TASK_REPORT + 1)
#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
@@ -1776,12 +1783,8 @@ extern struct pid *cad_pid;
static __always_inline bool is_percpu_thread(void)
{
-#ifdef CONFIG_SMP
return (current->flags & PF_NO_SETAFFINITY) &&
(current->nr_cpus_allowed == 1);
-#else
- return true;
-#endif
}
/* Per-process atomic flags. */
@@ -1846,7 +1849,6 @@ extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpu
extern int task_can_attach(struct task_struct *p);
extern int dl_bw_alloc(int cpu, u64 dl_bw);
extern void dl_bw_free(int cpu, u64 dl_bw);
-#ifdef CONFIG_SMP
/* do_set_cpus_allowed() - consider using set_cpus_allowed_ptr() instead */
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
@@ -1864,33 +1866,6 @@ extern void release_user_cpus_ptr(struct task_struct *p);
extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
-#else
-static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
-{
-}
-static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
-{
- /* Opencoded cpumask_test_cpu(0, new_mask) to avoid dependency on cpumask.h */
- if ((*cpumask_bits(new_mask) & 1) == 0)
- return -EINVAL;
- return 0;
-}
-static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node)
-{
- if (src->user_cpus_ptr)
- return -EINVAL;
- return 0;
-}
-static inline void release_user_cpus_ptr(struct task_struct *p)
-{
- WARN_ON(p->user_cpus_ptr);
-}
-
-static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
-{
- return 0;
-}
-#endif
extern int yield_to(struct task_struct *p, bool preempt);
extern void set_user_nice(struct task_struct *p, long nice);
@@ -1979,11 +1954,7 @@ extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk);
-#ifdef CONFIG_SMP
extern void kick_process(struct task_struct *tsk);
-#else
-static inline void kick_process(struct task_struct *tsk) { }
-#endif
extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
#define set_task_comm(tsk, from) ({ \
@@ -2010,7 +1981,6 @@ extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec
buf; \
})
-#ifdef CONFIG_SMP
static __always_inline void scheduler_ipi(void)
{
/*
@@ -2020,9 +1990,6 @@ static __always_inline void scheduler_ipi(void)
*/
preempt_fold_need_resched();
}
-#else
-static inline void scheduler_ipi(void) { }
-#endif
extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
@@ -2063,6 +2030,9 @@ static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
static inline void set_tsk_need_resched(struct task_struct *tsk)
{
+ if (tracepoint_enabled(sched_set_need_resched_tp) &&
+ !test_tsk_thread_flag(tsk, TIF_NEED_RESCHED))
+ __trace_set_need_resched(tsk, TIF_NEED_RESCHED);
set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}
@@ -2165,6 +2135,67 @@ extern int __cond_resched_rwlock_write(rwlock_t *lock);
__cond_resched_rwlock_write(lock); \
})
+#ifndef CONFIG_PREEMPT_RT
+static inline struct mutex *__get_task_blocked_on(struct task_struct *p)
+{
+ struct mutex *m = p->blocked_on;
+
+ if (m)
+ lockdep_assert_held_once(&m->wait_lock);
+ return m;
+}
+
+static inline void __set_task_blocked_on(struct task_struct *p, struct mutex *m)
+{
+ WARN_ON_ONCE(!m);
+ /* The task should only be setting itself as blocked */
+ WARN_ON_ONCE(p != current);
+ /* Currently we serialize blocked_on under the mutex::wait_lock */
+ lockdep_assert_held_once(&m->wait_lock);
+ /*
+ * Check ensure we don't overwrite existing mutex value
+ * with a different mutex. Note, setting it to the same
+ * lock repeatedly is ok.
+ */
+ WARN_ON_ONCE(p->blocked_on && p->blocked_on != m);
+ p->blocked_on = m;
+}
+
+static inline void set_task_blocked_on(struct task_struct *p, struct mutex *m)
+{
+ guard(raw_spinlock_irqsave)(&m->wait_lock);
+ __set_task_blocked_on(p, m);
+}
+
+static inline void __clear_task_blocked_on(struct task_struct *p, struct mutex *m)
+{
+ WARN_ON_ONCE(!m);
+ /* Currently we serialize blocked_on under the mutex::wait_lock */
+ lockdep_assert_held_once(&m->wait_lock);
+ /*
+ * There may be cases where we re-clear already cleared
+ * blocked_on relationships, but make sure we are not
+ * clearing the relationship with a different lock.
+ */
+ WARN_ON_ONCE(m && p->blocked_on && p->blocked_on != m);
+ p->blocked_on = NULL;
+}
+
+static inline void clear_task_blocked_on(struct task_struct *p, struct mutex *m)
+{
+ guard(raw_spinlock_irqsave)(&m->wait_lock);
+ __clear_task_blocked_on(p, m);
+}
+#else
+static inline void __clear_task_blocked_on(struct task_struct *p, struct rt_mutex *m)
+{
+}
+
+static inline void clear_task_blocked_on(struct task_struct *p, struct rt_mutex *m)
+{
+}
+#endif /* !CONFIG_PREEMPT_RT */
+
static __always_inline bool need_resched(void)
{
return unlikely(tif_need_resched());
@@ -2204,8 +2235,6 @@ extern bool sched_task_on_rq(struct task_struct *p);
extern unsigned long get_wchan(struct task_struct *p);
extern struct task_struct *cpu_curr_snapshot(int cpu);
-#include <linux/spinlock.h>
-
/*
* In order to reduce various lock holder preemption latencies provide an
* interface to see if a vCPU is currently running or not.
@@ -2228,7 +2257,6 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
#define TASK_SIZE_OF(tsk) TASK_SIZE
#endif
-#ifdef CONFIG_SMP
static inline bool owner_on_cpu(struct task_struct *owner)
{
/*
@@ -2240,7 +2268,6 @@ static inline bool owner_on_cpu(struct task_struct *owner)
/* Returns effective CPU energy utilization, as seen by the scheduler */
unsigned long sched_cpu_util(int cpu);
-#endif /* CONFIG_SMP */
#ifdef CONFIG_SCHED_CORE
extern void sched_core_free(struct task_struct *tsk);
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
index f9aabbc9d22e..c40115d4e34d 100644
--- a/include/linux/sched/deadline.h
+++ b/include/linux/sched/deadline.h
@@ -29,15 +29,11 @@ static inline bool dl_time_before(u64 a, u64 b)
return (s64)(a - b) < 0;
}
-#ifdef CONFIG_SMP
-
struct root_domain;
extern void dl_add_task_root_domain(struct task_struct *p);
extern void dl_clear_root_domain(struct root_domain *rd);
extern void dl_clear_root_domain_cpu(int cpu);
-#endif /* CONFIG_SMP */
-
extern u64 dl_cookie;
extern bool dl_bw_visited(int cpu, u64 cookie);
diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h
index f7545430a548..7047101dbf58 100644
--- a/include/linux/sched/ext.h
+++ b/include/linux/sched/ext.h
@@ -164,7 +164,7 @@ struct sched_ext_entity {
/*
* Runtime budget in nsecs. This is usually set through
- * scx_bpf_dispatch() but can also be modified directly by the BPF
+ * scx_bpf_dsq_insert() but can also be modified directly by the BPF
* scheduler. Automatically decreased by SCX as the task executes. On
* depletion, a scheduling event is triggered.
*
@@ -176,10 +176,10 @@ struct sched_ext_entity {
/*
* Used to order tasks when dispatching to the vtime-ordered priority
- * queue of a dsq. This is usually set through scx_bpf_dispatch_vtime()
- * but can also be modified directly by the BPF scheduler. Modifying it
- * while a task is queued on a dsq may mangle the ordering and is not
- * recommended.
+ * queue of a dsq. This is usually set through
+ * scx_bpf_dsq_insert_vtime() but can also be modified directly by the
+ * BPF scheduler. Modifying it while a task is queued on a dsq may
+ * mangle the ordering and is not recommended.
*/
u64 dsq_vtime;
@@ -206,12 +206,25 @@ struct sched_ext_entity {
void sched_ext_free(struct task_struct *p);
void print_scx_info(const char *log_lvl, struct task_struct *p);
void scx_softlockup(u32 dur_s);
+bool scx_rcu_cpu_stall(void);
#else /* !CONFIG_SCHED_CLASS_EXT */
static inline void sched_ext_free(struct task_struct *p) {}
static inline void print_scx_info(const char *log_lvl, struct task_struct *p) {}
static inline void scx_softlockup(u32 dur_s) {}
+static inline bool scx_rcu_cpu_stall(void) { return false; }
#endif /* CONFIG_SCHED_CLASS_EXT */
+
+struct scx_task_group {
+#ifdef CONFIG_EXT_GROUP_SCHED
+ u32 flags; /* SCX_TG_* */
+ u32 weight;
+ u64 bw_period_us;
+ u64 bw_quota_us;
+ u64 bw_burst_us;
+#endif
+};
+
#endif /* _LINUX_SCHED_EXT_H */
diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h
index 439f6029d3b9..8465ff1f20d1 100644
--- a/include/linux/sched/idle.h
+++ b/include/linux/sched/idle.h
@@ -11,11 +11,7 @@ enum cpu_idle_type {
CPU_MAX_IDLE_TYPES
};
-#ifdef CONFIG_SMP
extern void wake_up_if_idle(int cpu);
-#else
-static inline void wake_up_if_idle(int cpu) { }
-#endif
/*
* Idle thread specific functions to determine the need_resched
diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
index 6d67e9a5af6b..0db7f67935fe 100644
--- a/include/linux/sched/nohz.h
+++ b/include/linux/sched/nohz.h
@@ -6,7 +6,7 @@
* This is the interface between the scheduler and nohz/dynticks:
*/
-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+#ifdef CONFIG_NO_HZ_COMMON
extern void nohz_balance_enter_idle(int cpu);
extern int get_nohz_timer_target(void);
#else
@@ -23,7 +23,7 @@ static inline void calc_load_nohz_remote(struct rq *rq) { }
static inline void calc_load_nohz_stop(void) { }
#endif /* CONFIG_NO_HZ_COMMON */
-#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
+#ifdef CONFIG_NO_HZ_COMMON
extern void wake_up_nohz_cpu(int cpu);
#else
static inline void wake_up_nohz_cpu(int cpu) { }
diff --git a/include/linux/sched/sd_flags.h b/include/linux/sched/sd_flags.h
index b04a5d04dee9..42839cfa2778 100644
--- a/include/linux/sched/sd_flags.h
+++ b/include/linux/sched/sd_flags.h
@@ -154,14 +154,6 @@ SD_FLAG(SD_ASYM_PACKING, SDF_NEEDS_GROUPS)
SD_FLAG(SD_PREFER_SIBLING, SDF_NEEDS_GROUPS)
/*
- * sched_groups of this level overlap
- *
- * SHARED_PARENT: Set for all NUMA levels above NODE.
- * NEEDS_GROUPS: Overlaps can only exist with more than one group.
- */
-SD_FLAG(SD_OVERLAP, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
-
-/*
* Cross-node balancing
*
* SHARED_PARENT: Set for all NUMA levels above NODE.
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index ca1db4b92c32..ea41795a352b 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -109,11 +109,7 @@ int kernel_wait(pid_t pid, int *stat);
extern void free_task(struct task_struct *tsk);
/* sched_exec is called by processes performing an exec */
-#ifdef CONFIG_SMP
extern void sched_exec(void);
-#else
-#define sched_exec() {}
-#endif
static inline struct task_struct *get_task_struct(struct task_struct *t)
{
@@ -135,24 +131,17 @@ static inline void put_task_struct(struct task_struct *t)
return;
/*
- * In !RT, it is always safe to call __put_task_struct().
- * Under RT, we can only call it in preemptible context.
- */
- if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
- static DEFINE_WAIT_OVERRIDE_MAP(put_task_map, LD_WAIT_SLEEP);
-
- lock_map_acquire_try(&put_task_map);
- __put_task_struct(t);
- lock_map_release(&put_task_map);
- return;
- }
-
- /*
- * under PREEMPT_RT, we can't call put_task_struct
+ * Under PREEMPT_RT, we can't call __put_task_struct
* in atomic context because it will indirectly
- * acquire sleeping locks.
+ * acquire sleeping locks. The same is true if the
+ * current process has a mutex enqueued (blocked on
+ * a PI chain).
+ *
+ * In !RT, it is always safe to call __put_task_struct().
+ * Though, in order to simplify the code, resort to the
+ * deferred call too.
*
- * call_rcu() will schedule delayed_put_task_struct_rcu()
+ * call_rcu() will schedule __put_task_struct_rcu_cb()
* to be called in process context.
*
* __put_task_struct() is called when
@@ -165,7 +154,7 @@ static inline void put_task_struct(struct task_struct *t)
*
* delayed_free_task() also uses ->rcu, but it is only called
* when it fails to fork a process. Therefore, there is no
- * way it can conflict with put_task_struct().
+ * way it can conflict with __put_task_struct().
*/
call_rcu(&t->rcu, __put_task_struct_rcu_cb);
}
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 198bb5cc1774..5263746b63e8 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -9,7 +9,6 @@
/*
* sched-domains (multiprocessor balancing) declarations:
*/
-#ifdef CONFIG_SMP
/* Generate SD flag indexes */
#define SD_FLAG(name, mflags) __##name,
@@ -176,8 +175,6 @@ bool cpus_share_resources(int this_cpu, int that_cpu);
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
typedef int (*sched_domain_flags_f)(void);
-#define SDTL_OVERLAP 0x01
-
struct sd_data {
struct sched_domain *__percpu *sd;
struct sched_domain_shared *__percpu *sds;
@@ -188,7 +185,6 @@ struct sd_data {
struct sched_domain_topology_level {
sched_domain_mask_f mask;
sched_domain_flags_f sd_flags;
- int flags;
int numa_level;
struct sd_data data;
char *name;
@@ -197,39 +193,8 @@ struct sched_domain_topology_level {
extern void __init set_sched_topology(struct sched_domain_topology_level *tl);
extern void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio);
-
-# define SD_INIT_NAME(type) .name = #type
-
-#else /* CONFIG_SMP */
-
-struct sched_domain_attr;
-
-static inline void
-partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
- struct sched_domain_attr *dattr_new)
-{
-}
-
-static inline bool cpus_equal_capacity(int this_cpu, int that_cpu)
-{
- return true;
-}
-
-static inline bool cpus_share_cache(int this_cpu, int that_cpu)
-{
- return true;
-}
-
-static inline bool cpus_share_resources(int this_cpu, int that_cpu)
-{
- return true;
-}
-
-static inline void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio)
-{
-}
-
-#endif /* !CONFIG_SMP */
+#define SDTL_INIT(maskfn, flagsfn, dname) ((struct sched_domain_topology_level) \
+ { .mask = maskfn, .sd_flags = flagsfn, .name = #dname })
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
extern void rebuild_sched_domains_energy(void);
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 5f03a39a26f7..6d0f9c599ff7 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -11,6 +11,8 @@
#include <linux/fs_parser.h>
#include <linux/userfaultfd_k.h>
+struct swap_iocb;
+
/* inode in-kernel data */
#ifdef CONFIG_TMPFS_QUOTA
@@ -107,7 +109,8 @@ static inline bool shmem_mapping(struct address_space *mapping)
void shmem_unlock_mapping(struct address_space *mapping);
struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
-int shmem_writeout(struct folio *folio, struct writeback_control *wbc);
+int shmem_writeout(struct folio *folio, struct swap_iocb **plug,
+ struct list_head *folio_list);
void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
int shmem_unuse(unsigned int type);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 5520524c93bf..b8b06e71b73e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3665,7 +3665,13 @@ static inline void *skb_frag_address(const skb_frag_t *frag)
*/
static inline void *skb_frag_address_safe(const skb_frag_t *frag)
{
- void *ptr = page_address(skb_frag_page(frag));
+ struct page *page = skb_frag_page(frag);
+ void *ptr;
+
+ if (!page)
+ return NULL;
+
+ ptr = page_address(page);
if (unlikely(!ptr))
return NULL;
@@ -3873,20 +3879,26 @@ static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int l
bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i)
__must_check;
-static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
- const struct page *page, int off)
+static inline bool skb_can_coalesce_netmem(struct sk_buff *skb, int i,
+ netmem_ref netmem, int off)
{
if (skb_zcopy(skb))
return false;
if (i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
- return page == skb_frag_page(frag) &&
+ return netmem == skb_frag_netmem(frag) &&
off == skb_frag_off(frag) + skb_frag_size(frag);
}
return false;
}
+static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
+ const struct page *page, int off)
+{
+ return skb_can_coalesce_netmem(skb, i, page_to_netmem(page), off);
+}
+
static inline int __skb_linearize(struct sk_buff *skb)
{
return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
@@ -5253,7 +5265,7 @@ static inline void skb_mark_for_recycle(struct sk_buff *skb)
}
ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter,
- ssize_t maxsize, gfp_t gfp);
+ ssize_t maxsize);
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/soc/marvell/silicons.h b/include/linux/soc/marvell/silicons.h
new file mode 100644
index 000000000000..66bb9bfaf17d
--- /dev/null
+++ b/include/linux/soc/marvell/silicons.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2024 Marvell.
+ */
+
+#ifndef __SOC_SILICON_H
+#define __SOC_SILICON_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#if defined(CONFIG_ARM64)
+
+#define CN20K_CHIPID 0x20
+/*
+ * Silicon check for CN20K family
+ */
+static inline bool is_cn20k(struct pci_dev *pdev)
+{
+ return (pdev->subsystem_device & 0xFF) == CN20K_CHIPID;
+}
+#else
+#define is_cn20k(pdev) ((void)(pdev), 0)
+#endif
+
+#endif /* __SOC_SILICON_H */
diff --git a/include/linux/soc/qcom/ubwc.h b/include/linux/soc/qcom/ubwc.h
new file mode 100644
index 000000000000..1ed8b1b16bc9
--- /dev/null
+++ b/include/linux/soc/qcom/ubwc.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018, The Linux Foundation
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef __QCOM_UBWC_H__
+#define __QCOM_UBWC_H__
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+struct qcom_ubwc_cfg_data {
+ u32 ubwc_enc_version;
+ /* Can be read from MDSS_BASE + 0x58 */
+ u32 ubwc_dec_version;
+
+ /**
+ * @ubwc_swizzle: Whether to enable level 1, 2 & 3 bank swizzling.
+ *
+ * UBWC 1.0 always enables all three levels.
+ * UBWC 2.0 removes level 1 bank swizzling, leaving levels 2 & 3.
+ * UBWC 4.0 adds the optional ability to disable levels 2 & 3.
+ */
+ u32 ubwc_swizzle;
+#define UBWC_SWIZZLE_ENABLE_LVL1 BIT(0)
+#define UBWC_SWIZZLE_ENABLE_LVL2 BIT(1)
+#define UBWC_SWIZZLE_ENABLE_LVL3 BIT(2)
+
+ /**
+ * @highest_bank_bit: Highest Bank Bit
+ *
+ * The Highest Bank Bit value represents the bit of the highest
+ * DDR bank. This should ideally use DRAM type detection.
+ */
+ int highest_bank_bit;
+ bool ubwc_bank_spread;
+
+ /**
+ * @macrotile_mode: Macrotile Mode
+ *
+ * Whether to use 4-channel macrotiling mode or the newer
+ * 8-channel macrotiling mode introduced in UBWC 3.1. 0 is
+ * 4-channel and 1 is 8-channel.
+ */
+ bool macrotile_mode;
+};
+
+#define UBWC_1_0 0x10000000
+#define UBWC_2_0 0x20000000
+#define UBWC_3_0 0x30000000
+#define UBWC_4_0 0x40000000
+#define UBWC_4_3 0x40030000
+#define UBWC_5_0 0x50000000
+
+#if IS_ENABLED(CONFIG_QCOM_UBWC_CONFIG)
+const struct qcom_ubwc_cfg_data *qcom_ubwc_config_get_data(void);
+#else
+static inline const struct qcom_ubwc_cfg_data *qcom_ubwc_config_get_data(void)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+#endif
+
+static inline bool qcom_ubwc_get_ubwc_mode(const struct qcom_ubwc_cfg_data *cfg)
+{
+ bool ret = cfg->ubwc_enc_version == UBWC_1_0;
+
+ if (ret && !(cfg->ubwc_swizzle & UBWC_SWIZZLE_ENABLE_LVL1))
+ pr_err("UBWC config discrepancy - level 1 swizzling disabled on UBWC 1.0\n");
+
+ return ret;
+}
+
+#endif /* __QCOM_UBWC_H__ */
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index c4830dfaff3d..82390712794c 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -424,7 +424,7 @@ bool spi_mem_default_supports_op(struct spi_mem *mem,
int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op);
void spi_mem_adjust_op_freq(struct spi_mem *mem, struct spi_mem_op *op);
-u64 spi_mem_calc_op_duration(struct spi_mem_op *op);
+u64 spi_mem_calc_op_duration(struct spi_mem *mem, struct spi_mem_op *op);
bool spi_mem_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op);
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 900b0d5c05f5..f179700fecaf 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -46,11 +46,11 @@ int init_srcu_struct(struct srcu_struct *ssp);
/* Values for SRCU Tree srcu_data ->srcu_reader_flavor, but also used by rcutorture. */
#define SRCU_READ_FLAVOR_NORMAL 0x1 // srcu_read_lock().
#define SRCU_READ_FLAVOR_NMI 0x2 // srcu_read_lock_nmisafe().
-#define SRCU_READ_FLAVOR_LITE 0x4 // srcu_read_lock_lite().
+// 0x4 // SRCU-lite is no longer with us.
#define SRCU_READ_FLAVOR_FAST 0x8 // srcu_read_lock_fast().
#define SRCU_READ_FLAVOR_ALL (SRCU_READ_FLAVOR_NORMAL | SRCU_READ_FLAVOR_NMI | \
- SRCU_READ_FLAVOR_LITE | SRCU_READ_FLAVOR_FAST) // All of the above.
-#define SRCU_READ_FLAVOR_SLOWGP (SRCU_READ_FLAVOR_LITE | SRCU_READ_FLAVOR_FAST)
+ SRCU_READ_FLAVOR_FAST) // All of the above.
+#define SRCU_READ_FLAVOR_SLOWGP SRCU_READ_FLAVOR_FAST
// Flavors requiring synchronize_rcu()
// instead of smp_mb().
void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
@@ -300,33 +300,6 @@ static inline struct srcu_ctr __percpu *srcu_down_read_fast(struct srcu_struct *
}
/**
- * srcu_read_lock_lite - register a new reader for an SRCU-protected structure.
- * @ssp: srcu_struct in which to register the new reader.
- *
- * Enter an SRCU read-side critical section, but for a light-weight
- * smp_mb()-free reader. See srcu_read_lock() for more information.
- *
- * If srcu_read_lock_lite() is ever used on an srcu_struct structure,
- * then none of the other flavors may be used, whether before, during,
- * or after. Note that grace-period auto-expediting is disabled for _lite
- * srcu_struct structures because auto-expedited grace periods invoke
- * synchronize_rcu_expedited(), IPIs and all.
- *
- * Note that srcu_read_lock_lite() can be invoked only from those contexts
- * where RCU is watching, that is, from contexts where it would be legal
- * to invoke rcu_read_lock(). Otherwise, lockdep will complain.
- */
-static inline int srcu_read_lock_lite(struct srcu_struct *ssp) __acquires(ssp)
-{
- int retval;
-
- srcu_check_read_flavor_force(ssp, SRCU_READ_FLAVOR_LITE);
- retval = __srcu_read_lock_lite(ssp);
- rcu_try_lock_acquire(&ssp->dep_map);
- return retval;
-}
-
-/**
* srcu_read_lock_nmisafe - register a new reader for an SRCU-protected structure.
* @ssp: srcu_struct in which to register the new reader.
*
@@ -435,22 +408,6 @@ static inline void srcu_up_read_fast(struct srcu_struct *ssp, struct srcu_ctr __
}
/**
- * srcu_read_unlock_lite - unregister a old reader from an SRCU-protected structure.
- * @ssp: srcu_struct in which to unregister the old reader.
- * @idx: return value from corresponding srcu_read_lock_lite().
- *
- * Exit a light-weight SRCU read-side critical section.
- */
-static inline void srcu_read_unlock_lite(struct srcu_struct *ssp, int idx)
- __releases(ssp)
-{
- WARN_ON_ONCE(idx & ~0x1);
- srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_LITE);
- srcu_lock_release(&ssp->dep_map);
- __srcu_read_unlock_lite(ssp, idx);
-}
-
-/**
* srcu_read_unlock_nmisafe - unregister a old reader from an SRCU-protected structure.
* @ssp: srcu_struct in which to unregister the old reader.
* @idx: return value from corresponding srcu_read_lock_nmisafe().
@@ -524,4 +481,9 @@ DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct,
srcu_read_unlock(_T->lock, _T->idx),
int idx)
+DEFINE_LOCK_GUARD_1(srcu_fast, struct srcu_struct,
+ _T->scp = srcu_read_lock_fast(_T->lock),
+ srcu_read_unlock_fast(_T->lock, _T->scp),
+ struct srcu_ctr __percpu *scp)
+
#endif
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index 380260317d98..51ce25f07930 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -93,9 +93,6 @@ static inline void __srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_
__srcu_read_unlock(ssp, __srcu_ptr_to_ctr(ssp, scp));
}
-#define __srcu_read_lock_lite __srcu_read_lock
-#define __srcu_read_unlock_lite __srcu_read_unlock
-
static inline void synchronize_srcu_expedited(struct srcu_struct *ssp)
{
synchronize_srcu(ssp);
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 8bed7e6cc4c1..bf44d8d1e69e 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -278,44 +278,6 @@ static inline void __srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_
RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_unlock_fast().");
}
-/*
- * Counts the new reader in the appropriate per-CPU element of the
- * srcu_struct. Returns an index that must be passed to the matching
- * srcu_read_unlock_lite().
- *
- * Note that this_cpu_inc() is an RCU read-side critical section either
- * because it disables interrupts, because it is a single instruction,
- * or because it is a read-modify-write atomic operation, depending on
- * the whims of the architecture.
- */
-static inline int __srcu_read_lock_lite(struct srcu_struct *ssp)
-{
- struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp);
-
- RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_lock_lite().");
- this_cpu_inc(scp->srcu_locks.counter); /* Y */
- barrier(); /* Avoid leaking the critical section. */
- return __srcu_ptr_to_ctr(ssp, scp);
-}
-
-/*
- * Removes the count for the old reader from the appropriate
- * per-CPU element of the srcu_struct. Note that this may well be a
- * different CPU than that which was incremented by the corresponding
- * srcu_read_lock_lite(), but it must be within the same task.
- *
- * Note that this_cpu_inc() is an RCU read-side critical section either
- * because it disables interrupts, because it is a single instruction,
- * or because it is a read-modify-write atomic operation, depending on
- * the whims of the architecture.
- */
-static inline void __srcu_read_unlock_lite(struct srcu_struct *ssp, int idx)
-{
- barrier(); /* Avoid leaking the critical section. */
- this_cpu_inc(__srcu_ctr_to_ptr(ssp, idx)->srcu_unlocks.counter); /* Z */
- RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_unlock_lite().");
-}
-
void __srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor);
// Record reader usage even for CONFIG_PROVE_RCU=n kernels. This is
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index 3132262a404d..72820503514c 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -88,55 +88,73 @@ static inline void print_stop_info(const char *log_lvl, struct task_struct *task
#endif /* CONFIG_SMP */
/*
- * stop_machine "Bogolock": stop the entire machine, disable
- * interrupts. This is a very heavy lock, which is equivalent to
- * grabbing every spinlock (and more). So the "read" side to such a
- * lock is anything which disables preemption.
+ * stop_machine "Bogolock": stop the entire machine, disable interrupts.
+ * This is a very heavy lock, which is equivalent to grabbing every raw
+ * spinlock (and more). So the "read" side to such a lock is anything
+ * which disables preemption.
*/
#if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
/**
* stop_machine: freeze the machine on all CPUs and run this function
* @fn: the function to run
- * @data: the data ptr for the @fn()
- * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
+ * @data: the data ptr to pass to @fn()
+ * @cpus: the cpus to run @fn() on (NULL = run on each online CPU)
*
- * Description: This causes a thread to be scheduled on every cpu,
- * each of which disables interrupts. The result is that no one is
- * holding a spinlock or inside any other preempt-disabled region when
- * @fn() runs.
+ * Description: This causes a thread to be scheduled on every CPU, which
+ * will run with interrupts disabled. Each CPU specified by @cpus will
+ * run @fn. While @fn is executing, there will no other CPUs holding
+ * a raw spinlock or running within any other type of preempt-disabled
+ * region of code.
*
- * This can be thought of as a very heavy write lock, equivalent to
- * grabbing every spinlock in the kernel.
+ * When @cpus specifies only a single CPU, this can be thought of as
+ * a reader-writer lock where readers disable preemption (for example,
+ * by holding a raw spinlock) and where the insanely heavy writers run
+ * @fn while also preventing any other CPU from doing any useful work.
+ * These writers can also be thought of as having implicitly grabbed every
+ * raw spinlock in the kernel.
*
- * Protects against CPU hotplug.
+ * When @fn is a no-op, this can be thought of as an RCU implementation
+ * where readers again disable preemption and writers use stop_machine()
+ * in place of synchronize_rcu(), albeit with orders of magnitude more
+ * disruption than even that of synchronize_rcu_expedited().
+ *
+ * Although only one stop_machine() operation can proceed at a time,
+ * the possibility of blocking in cpus_read_lock() means that the caller
+ * cannot usefully rely on this serialization.
+ *
+ * Return: 0 if all invocations of @fn return zero. Otherwise, the
+ * value returned by an arbitrarily chosen member of the set of calls to
+ * @fn that returned non-zero.
*/
int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
/**
* stop_machine_cpuslocked: freeze the machine on all CPUs and run this function
* @fn: the function to run
- * @data: the data ptr for the @fn()
- * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
+ * @data: the data ptr to pass to @fn()
+ * @cpus: the cpus to run @fn() on (NULL = run on each online CPU)
+ *
+ * Same as above. Avoids nested calls to cpus_read_lock().
*
- * Same as above. Must be called from with in a cpus_read_lock() protected
- * region. Avoids nested calls to cpus_read_lock().
+ * Context: Must be called from within a cpus_read_lock() protected region.
*/
int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
/**
* stop_core_cpuslocked: - stop all threads on just one core
* @cpu: any cpu in the targeted core
- * @fn: the function to run
- * @data: the data ptr for @fn()
+ * @fn: the function to run on each CPU in the core containing @cpu
+ * @data: the data ptr to pass to @fn()
*
- * Same as above, but instead of every CPU, only the logical CPUs of a
- * single core are affected.
+ * Same as above, but instead of every CPU, only the logical CPUs of the
+ * single core containing @cpu are affected.
*
* Context: Must be called from within a cpus_read_lock() protected region.
*
- * Return: 0 if all executions of @fn returned 0, any non zero return
- * value if any returned non zero.
+ * Return: 0 if all invocations of @fn return zero. Otherwise, the
+ * value returned by an arbitrarily chosen member of the set of calls to
+ * @fn that returned non-zero.
*/
int stop_core_cpuslocked(unsigned int cpu, cpu_stop_fn_t fn, void *data);
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 6a3f92098872..317ae31e89b3 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -478,6 +478,7 @@ extern unsigned int lock_system_sleep(void);
extern void unlock_system_sleep(unsigned int);
extern bool pm_sleep_transition_in_progress(void);
+bool pm_hibernate_is_recovering(void);
#else /* !CONFIG_PM_SLEEP */
@@ -511,6 +512,7 @@ static inline unsigned int lock_system_sleep(void) { return 0; }
static inline void unlock_system_sleep(unsigned int flags) {}
static inline bool pm_sleep_transition_in_progress(void) { return false; }
+static inline bool pm_hibernate_is_recovering(void) { return false; }
#endif /* !CONFIG_PM_SLEEP */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index bc0e1c275fc0..2fe6ed2cc3fd 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -376,8 +376,9 @@ extern unsigned long totalreserve_pages;
/* linux/mm/swap.c */
-void lru_note_cost(struct lruvec *lruvec, bool file,
- unsigned int nr_io, unsigned int nr_rotated);
+void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file,
+ unsigned int nr_io, unsigned int nr_rotated)
+ __releases(lruvec->lru_lock);
void lru_note_cost_refault(struct folio *);
void folio_add_lru(struct folio *);
void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
@@ -415,7 +416,7 @@ extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
#define MIN_SWAPPINESS 0
#define MAX_SWAPPINESS 200
-/* Just recliam from anon folios in proactive memory reclaim */
+/* Just reclaim from anon folios in proactive memory reclaim */
#define SWAPPINESS_ANON_ONLY (MAX_SWAPPINESS + 1)
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
@@ -431,6 +432,22 @@ extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
long remove_mapping(struct address_space *mapping, struct folio *folio);
+#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
+extern int reclaim_register_node(struct node *node);
+extern void reclaim_unregister_node(struct node *node);
+
+#else
+
+static inline int reclaim_register_node(struct node *node)
+{
+ return 0;
+}
+
+static inline void reclaim_unregister_node(struct node *node)
+{
+}
+#endif /* CONFIG_SYSFS && CONFIG_NUMA */
+
#ifdef CONFIG_NUMA
extern int sysctl_min_unmapped_ratio;
extern int sysctl_min_slab_ratio;
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 40a6ac6c9713..92e9146b1104 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -242,9 +242,7 @@ int do_proc_douintvec(const struct ctl_table *table, int write,
int write, void *data),
void *data);
-extern int pwrsw_enabled;
extern int unaligned_enabled;
-extern int unaligned_dump_stack;
extern int no_unaligned_warning;
#else /* CONFIG_SYSCTL */
@@ -285,7 +283,4 @@ static inline bool sysctl_is_alias(char *param)
}
#endif /* CONFIG_SYSCTL */
-int sysctl_max_threads(const struct ctl_table *table, int write, void *buffer,
- size_t *lenp, loff_t *ppos);
-
#endif /* _LINUX_SYSCTL_H */
diff --git a/include/linux/sysfb.h b/include/linux/sysfb.h
index 07cbab516942..b449665c686a 100644
--- a/include/linux/sysfb.h
+++ b/include/linux/sysfb.h
@@ -7,9 +7,13 @@
* Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com>
*/
-#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/types.h>
+
#include <linux/platform_data/simplefb.h>
+struct device;
+struct platform_device;
struct screen_info;
enum {
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 29f59d50dc73..57e478bfaef2 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -208,7 +208,6 @@ struct tcp_sock {
u32 notsent_lowat; /* TCP_NOTSENT_LOWAT */
u16 gso_segs; /* Max number of segs per GSO packet */
/* from STCP, retrans queue hinting */
- struct sk_buff *lost_skb_hint;
struct sk_buff *retransmit_skb_hint;
__cacheline_group_end(tcp_sock_read_tx);
@@ -419,8 +418,6 @@ struct tcp_sock {
struct tcp_sack_block recv_sack_cache[4];
- int lost_cnt_hint;
-
u32 prior_ssthresh; /* ssthresh saved at recovery start */
u32 high_seq; /* snd_nxt at onset of congestion */
@@ -624,6 +621,7 @@ void tcp_sock_set_nodelay(struct sock *sk);
void tcp_sock_set_quickack(struct sock *sk, int val);
int tcp_sock_set_syncnt(struct sock *sk, int val);
int tcp_sock_set_user_timeout(struct sock *sk, int val);
+int tcp_sock_set_maxseg(struct sock *sk, int val);
static inline bool dst_tcp_usec_ts(const struct dst_entry *dst)
{
diff --git a/include/linux/tnum.h b/include/linux/tnum.h
index 3c13240077b8..57ed3035cc30 100644
--- a/include/linux/tnum.h
+++ b/include/linux/tnum.h
@@ -40,6 +40,8 @@ struct tnum tnum_arshift(struct tnum a, u8 min_shift, u8 insn_bitness);
struct tnum tnum_add(struct tnum a, struct tnum b);
/* Subtract two tnums, return @a - @b */
struct tnum tnum_sub(struct tnum a, struct tnum b);
+/* Neg of a tnum, return 0 - @a */
+struct tnum tnum_neg(struct tnum a);
/* Bitwise-AND, return @a & @b */
struct tnum tnum_and(struct tnum a, struct tnum b);
/* Bitwise-OR, return @a | @b */
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 7c06f4795670..1beb5b395d81 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -296,6 +296,8 @@ static inline bool pagefault_disabled(void)
*/
#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
+DEFINE_LOCK_GUARD_0(pagefault, pagefault_disable(), pagefault_enable())
+
#ifndef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
/**
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 2d207cb4837d..4ac082a63173 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -119,6 +119,7 @@ struct cdc_ncm_ctx {
u32 timer_interval;
u32 max_ndp_size;
u8 is_ndp16;
+ u8 filtering_supported;
union {
struct usb_cdc_ncm_ndp16 *delayed_ndp16;
struct usb_cdc_ncm_ndp32 *delayed_ndp32;
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 0b9f1e598e3a..a2d54122823d 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -58,7 +58,7 @@ struct usbnet {
unsigned interrupt_count;
struct mutex interrupt_mutex;
struct usb_anchor deferred;
- struct tasklet_struct bh;
+ struct work_struct bh_work;
struct work_struct kevent;
unsigned long flags;
@@ -76,6 +76,7 @@ struct usbnet {
# define EVENT_LINK_CHANGE 11
# define EVENT_SET_RX_MODE 12
# define EVENT_NO_IP_ALIGN 13
+# define EVENT_LINK_CARRIER_ON 14
/* This one is special, as it indicates that the device is going away
* there are cyclic dependencies between tasklet, timer and bh
* that must be broken
diff --git a/include/linux/usb/uvc.h b/include/linux/usb/uvc.h
index bce95153e5a6..ee19e9f915b8 100644
--- a/include/linux/usb/uvc.h
+++ b/include/linux/usb/uvc.h
@@ -29,6 +29,9 @@
#define UVC_GUID_EXT_GPIO_CONTROLLER \
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x03}
+#define UVC_GUID_MSXU_1_5 \
+ {0xdc, 0x95, 0x3f, 0x0f, 0x32, 0x26, 0x4e, 0x4c, \
+ 0x92, 0xc9, 0xa0, 0x47, 0x82, 0xf4, 0x3b, 0xc8}
#define UVC_GUID_FORMAT_MJPEG \
{ 'M', 'J', 'P', 'G', 0x00, 0x00, 0x10, 0x00, \
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index 75342022d144..c0e716aec26a 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -30,11 +30,7 @@
* from userfaultfd, in order to leave a free define-space for
* shared O_* flags.
*/
-#define UFFD_CLOEXEC O_CLOEXEC
-#define UFFD_NONBLOCK O_NONBLOCK
-
#define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
-#define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
/*
* Start with fault_pending_wqh and fault_wqh so they're more likely
@@ -213,12 +209,12 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma)
}
static inline bool vma_can_userfault(struct vm_area_struct *vma,
- unsigned long vm_flags,
+ vm_flags_t vm_flags,
bool wp_async)
{
vm_flags &= __VM_UFFD_FLAGS;
- if (vm_flags & VM_DROPPABLE)
+ if (vma->vm_flags & VM_DROPPABLE)
return false;
if ((vm_flags & VM_UFFD_MINOR) &&
@@ -263,6 +259,7 @@ extern void mremap_userfaultfd_prep(struct vm_area_struct *,
extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *,
unsigned long from, unsigned long to,
unsigned long len);
+void mremap_userfaultfd_fail(struct vm_userfaultfd_ctx *);
extern bool userfaultfd_remove(struct vm_area_struct *vma,
unsigned long start,
@@ -285,7 +282,7 @@ struct vm_area_struct *userfaultfd_clear_vma(struct vma_iterator *vmi,
int userfaultfd_register_range(struct userfaultfd_ctx *ctx,
struct vm_area_struct *vma,
- unsigned long vm_flags,
+ vm_flags_t vm_flags,
unsigned long start, unsigned long end,
bool wp_async);
@@ -375,6 +372,10 @@ static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx,
{
}
+static inline void mremap_userfaultfd_fail(struct vm_userfaultfd_ctx *ctx)
+{
+}
+
static inline bool userfaultfd_remove(struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
diff --git a/include/linux/usermode_driver.h b/include/linux/usermode_driver.h
deleted file mode 100644
index ad970416260d..000000000000
--- a/include/linux/usermode_driver.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef __LINUX_USERMODE_DRIVER_H__
-#define __LINUX_USERMODE_DRIVER_H__
-
-#include <linux/umh.h>
-#include <linux/path.h>
-
-struct umd_info {
- const char *driver_name;
- struct file *pipe_to_umh;
- struct file *pipe_from_umh;
- struct path wd;
- struct pid *tgid;
-};
-int umd_load_blob(struct umd_info *info, const void *data, size_t len);
-int umd_unload_blob(struct umd_info *info);
-int fork_usermode_driver(struct umd_info *info);
-void umd_cleanup_helper(struct umd_info *info);
-
-#endif /* __LINUX_USERMODE_DRIVER_H__ */
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 64cb4b04be7a..04b90c88d164 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -11,6 +11,7 @@
#include <linux/gfp.h>
#include <linux/dma-mapping.h>
#include <linux/completion.h>
+#include <linux/virtio_features.h>
/**
* struct virtqueue - a queue to register buffers for sending or receiving.
@@ -141,7 +142,9 @@ struct virtio_admin_cmd {
* @config: the configuration ops for this device.
* @vringh_config: configuration ops for host vrings.
* @vqs: the list of virtqueues for this device.
- * @features: the features supported by both driver and device.
+ * @features: the 64 lower features supported by both driver and device.
+ * @features_array: the full features space supported by both driver and
+ * device.
* @priv: private pointer for the driver's use.
* @debugfs_dir: debugfs directory entry.
* @debugfs_filter_features: features to be filtered set by debugfs.
@@ -159,11 +162,11 @@ struct virtio_device {
const struct virtio_config_ops *config;
const struct vringh_config_ops *vringh_config;
struct list_head vqs;
- u64 features;
+ VIRTIO_DECLARE_FEATURES(features);
void *priv;
#ifdef CONFIG_VIRTIO_DEBUG
struct dentry *debugfs_dir;
- u64 debugfs_filter_features;
+ u64 debugfs_filter_features[VIRTIO_FEATURES_DWORDS];
#endif
};
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index b3e1d30c765b..918cf25cd3c6 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -77,7 +77,11 @@ struct virtqueue_info {
* vdev: the virtio_device
* @get_features: get the array of feature bits for this device.
* vdev: the virtio_device
- * Returns the first 64 feature bits (all we currently need).
+ * Returns the first 64 feature bits.
+ * @get_extended_features:
+ * vdev: the virtio_device
+ * Returns the first VIRTIO_FEATURES_MAX feature bits (all we currently
+ * need).
* @finalize_features: confirm what device features we'll be using.
* vdev: the virtio_device
* This sends the driver feature bits to the device: it can change
@@ -121,6 +125,8 @@ struct virtio_config_ops {
void (*del_vqs)(struct virtio_device *);
void (*synchronize_cbs)(struct virtio_device *);
u64 (*get_features)(struct virtio_device *vdev);
+ void (*get_extended_features)(struct virtio_device *vdev,
+ u64 *features);
int (*finalize_features)(struct virtio_device *vdev);
const char *(*bus_name)(struct virtio_device *vdev);
int (*set_vq_affinity)(struct virtqueue *vq,
@@ -147,13 +153,7 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
static inline bool __virtio_test_bit(const struct virtio_device *vdev,
unsigned int fbit)
{
- /* Did you forget to fix assumptions on max features? */
- if (__builtin_constant_p(fbit))
- BUILD_BUG_ON(fbit >= 64);
- else
- BUG_ON(fbit >= 64);
-
- return vdev->features & BIT_ULL(fbit);
+ return virtio_features_test_bit(vdev->features_array, fbit);
}
/**
@@ -164,13 +164,7 @@ static inline bool __virtio_test_bit(const struct virtio_device *vdev,
static inline void __virtio_set_bit(struct virtio_device *vdev,
unsigned int fbit)
{
- /* Did you forget to fix assumptions on max features? */
- if (__builtin_constant_p(fbit))
- BUILD_BUG_ON(fbit >= 64);
- else
- BUG_ON(fbit >= 64);
-
- vdev->features |= BIT_ULL(fbit);
+ virtio_features_set_bit(vdev->features_array, fbit);
}
/**
@@ -181,13 +175,7 @@ static inline void __virtio_set_bit(struct virtio_device *vdev,
static inline void __virtio_clear_bit(struct virtio_device *vdev,
unsigned int fbit)
{
- /* Did you forget to fix assumptions on max features? */
- if (__builtin_constant_p(fbit))
- BUILD_BUG_ON(fbit >= 64);
- else
- BUG_ON(fbit >= 64);
-
- vdev->features &= ~BIT_ULL(fbit);
+ virtio_features_clear_bit(vdev->features_array, fbit);
}
/**
@@ -204,6 +192,17 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev,
return __virtio_test_bit(vdev, fbit);
}
+static inline void virtio_get_features(struct virtio_device *vdev,
+ u64 *features)
+{
+ if (vdev->config->get_extended_features) {
+ vdev->config->get_extended_features(vdev, features);
+ return;
+ }
+
+ virtio_features_from_u64(features, vdev->config->get_features(vdev));
+}
+
/**
* virtio_has_dma_quirk - determine whether this device has the DMA quirk
* @vdev: the device
diff --git a/include/linux/virtio_features.h b/include/linux/virtio_features.h
new file mode 100644
index 000000000000..f748f2f87de8
--- /dev/null
+++ b/include/linux/virtio_features.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_VIRTIO_FEATURES_H
+#define _LINUX_VIRTIO_FEATURES_H
+
+#include <linux/bits.h>
+
+#define VIRTIO_FEATURES_DWORDS 2
+#define VIRTIO_FEATURES_MAX (VIRTIO_FEATURES_DWORDS * 64)
+#define VIRTIO_FEATURES_WORDS (VIRTIO_FEATURES_DWORDS * 2)
+#define VIRTIO_BIT(b) BIT_ULL((b) & 0x3f)
+#define VIRTIO_DWORD(b) ((b) >> 6)
+#define VIRTIO_DECLARE_FEATURES(name) \
+ union { \
+ u64 name; \
+ u64 name##_array[VIRTIO_FEATURES_DWORDS];\
+ }
+
+static inline bool virtio_features_chk_bit(unsigned int bit)
+{
+ if (__builtin_constant_p(bit)) {
+ /*
+ * Don't care returning the correct value: the build
+ * will fail before any bad features access
+ */
+ BUILD_BUG_ON(bit >= VIRTIO_FEATURES_MAX);
+ } else {
+ if (WARN_ON_ONCE(bit >= VIRTIO_FEATURES_MAX))
+ return false;
+ }
+ return true;
+}
+
+static inline bool virtio_features_test_bit(const u64 *features,
+ unsigned int bit)
+{
+ return virtio_features_chk_bit(bit) &&
+ !!(features[VIRTIO_DWORD(bit)] & VIRTIO_BIT(bit));
+}
+
+static inline void virtio_features_set_bit(u64 *features,
+ unsigned int bit)
+{
+ if (virtio_features_chk_bit(bit))
+ features[VIRTIO_DWORD(bit)] |= VIRTIO_BIT(bit);
+}
+
+static inline void virtio_features_clear_bit(u64 *features,
+ unsigned int bit)
+{
+ if (virtio_features_chk_bit(bit))
+ features[VIRTIO_DWORD(bit)] &= ~VIRTIO_BIT(bit);
+}
+
+static inline void virtio_features_zero(u64 *features)
+{
+ memset(features, 0, sizeof(features[0]) * VIRTIO_FEATURES_DWORDS);
+}
+
+static inline void virtio_features_from_u64(u64 *features, u64 from)
+{
+ virtio_features_zero(features);
+ features[0] = from;
+}
+
+static inline bool virtio_features_equal(const u64 *f1, const u64 *f2)
+{
+ int i;
+
+ for (i = 0; i < VIRTIO_FEATURES_DWORDS; ++i)
+ if (f1[i] != f2[i])
+ return false;
+ return true;
+}
+
+static inline void virtio_features_copy(u64 *to, const u64 *from)
+{
+ memcpy(to, from, sizeof(to[0]) * VIRTIO_FEATURES_DWORDS);
+}
+
+static inline void virtio_features_andnot(u64 *to, const u64 *f1, const u64 *f2)
+{
+ int i;
+
+ for (i = 0; i < VIRTIO_FEATURES_DWORDS; i++)
+ to[i] = f1[i] & ~f2[i];
+}
+
+#endif
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 02a9f4dc594d..20e0584db1dd 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -47,9 +47,9 @@ static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
return 0;
}
-static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
- const struct virtio_net_hdr *hdr,
- bool little_endian)
+static inline int __virtio_net_hdr_to_skb(struct sk_buff *skb,
+ const struct virtio_net_hdr *hdr,
+ bool little_endian, u8 hdr_gso_type)
{
unsigned int nh_min_len = sizeof(struct iphdr);
unsigned int gso_type = 0;
@@ -57,8 +57,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
unsigned int p_off = 0;
unsigned int ip_proto;
- if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
- switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+ if (hdr_gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ switch (hdr_gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
gso_type = SKB_GSO_TCPV4;
ip_proto = IPPROTO_TCP;
@@ -84,7 +84,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
return -EINVAL;
}
- if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
+ if (hdr_gso_type & VIRTIO_NET_HDR_GSO_ECN)
gso_type |= SKB_GSO_TCP_ECN;
if (hdr->gso_size == 0)
@@ -122,7 +122,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
if (!protocol)
virtio_net_hdr_set_proto(skb, hdr);
- else if (!virtio_net_hdr_match_proto(protocol, hdr->gso_type))
+ else if (!virtio_net_hdr_match_proto(protocol,
+ hdr_gso_type))
return -EINVAL;
else
skb->protocol = protocol;
@@ -153,7 +154,7 @@ retry:
}
}
- if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ if (hdr_gso_type != VIRTIO_NET_HDR_GSO_NONE) {
u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size);
unsigned int nh_off = p_off;
struct skb_shared_info *shinfo = skb_shinfo(skb);
@@ -199,6 +200,13 @@ retry:
return 0;
}
+static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
+ const struct virtio_net_hdr *hdr,
+ bool little_endian)
+{
+ return __virtio_net_hdr_to_skb(skb, hdr, little_endian, hdr->gso_type);
+}
+
static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
struct virtio_net_hdr *hdr,
bool little_endian,
@@ -242,4 +250,177 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
return 0;
}
+static inline unsigned int virtio_l3min(bool is_ipv6)
+{
+ return is_ipv6 ? sizeof(struct ipv6hdr) : sizeof(struct iphdr);
+}
+
+static inline int
+virtio_net_hdr_tnl_to_skb(struct sk_buff *skb,
+ const struct virtio_net_hdr_v1_hash_tunnel *vhdr,
+ bool tnl_hdr_negotiated,
+ bool tnl_csum_negotiated,
+ bool little_endian)
+{
+ const struct virtio_net_hdr *hdr = (const struct virtio_net_hdr *)vhdr;
+ unsigned int inner_nh, outer_th, inner_th;
+ unsigned int inner_l3min, outer_l3min;
+ u8 gso_inner_type, gso_tunnel_type;
+ bool outer_isv6, inner_isv6;
+ int ret;
+
+ gso_tunnel_type = hdr->gso_type & VIRTIO_NET_HDR_GSO_UDP_TUNNEL;
+ if (!gso_tunnel_type)
+ return virtio_net_hdr_to_skb(skb, hdr, little_endian);
+
+ /* Tunnel not supported/negotiated, but the hdr asks for it. */
+ if (!tnl_hdr_negotiated)
+ return -EINVAL;
+
+ /* Either ipv4 or ipv6. */
+ if (gso_tunnel_type == VIRTIO_NET_HDR_GSO_UDP_TUNNEL)
+ return -EINVAL;
+
+ /* The UDP tunnel must carry a GSO packet, but no UFO. */
+ gso_inner_type = hdr->gso_type & ~(VIRTIO_NET_HDR_GSO_ECN |
+ VIRTIO_NET_HDR_GSO_UDP_TUNNEL);
+ if (!gso_inner_type || gso_inner_type == VIRTIO_NET_HDR_GSO_UDP)
+ return -EINVAL;
+
+ /* Rely on csum being present. */
+ if (!(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
+ return -EINVAL;
+
+ /* Validate offsets. */
+ outer_isv6 = gso_tunnel_type & VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6;
+ inner_isv6 = gso_inner_type == VIRTIO_NET_HDR_GSO_TCPV6;
+ inner_l3min = virtio_l3min(inner_isv6);
+ outer_l3min = ETH_HLEN + virtio_l3min(outer_isv6);
+
+ inner_th = __virtio16_to_cpu(little_endian, hdr->csum_start);
+ inner_nh = le16_to_cpu(vhdr->inner_nh_offset);
+ outer_th = le16_to_cpu(vhdr->outer_th_offset);
+ if (outer_th < outer_l3min ||
+ inner_nh < outer_th + sizeof(struct udphdr) ||
+ inner_th < inner_nh + inner_l3min)
+ return -EINVAL;
+
+ /* Let the basic parsing deal with plain GSO features. */
+ ret = __virtio_net_hdr_to_skb(skb, hdr, true,
+ hdr->gso_type & ~gso_tunnel_type);
+ if (ret)
+ return ret;
+
+ /* In case of USO, the inner protocol is still unknown and
+ * `inner_isv6` is just a guess, additional parsing is needed.
+ * The previous validation ensures that accessing an ipv4 inner
+ * network header is safe.
+ */
+ if (gso_inner_type == VIRTIO_NET_HDR_GSO_UDP_L4) {
+ struct iphdr *iphdr = (struct iphdr *)(skb->data + inner_nh);
+
+ inner_isv6 = iphdr->version == 6;
+ inner_l3min = virtio_l3min(inner_isv6);
+ if (inner_th < inner_nh + inner_l3min)
+ return -EINVAL;
+ }
+
+ skb_set_inner_protocol(skb, inner_isv6 ? htons(ETH_P_IPV6) :
+ htons(ETH_P_IP));
+ if (hdr->flags & VIRTIO_NET_HDR_F_UDP_TUNNEL_CSUM) {
+ if (!tnl_csum_negotiated)
+ return -EINVAL;
+
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
+ } else {
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+ }
+
+ skb->inner_transport_header = inner_th + skb_headroom(skb);
+ skb->inner_network_header = inner_nh + skb_headroom(skb);
+ skb->inner_mac_header = inner_nh + skb_headroom(skb);
+ skb->transport_header = outer_th + skb_headroom(skb);
+ skb->encapsulation = 1;
+ return 0;
+}
+
+/* Checksum-related fields validation for the driver */
+static inline int virtio_net_handle_csum_offload(struct sk_buff *skb,
+ struct virtio_net_hdr *hdr,
+ bool tnl_csum_negotiated)
+{
+ if (!(hdr->gso_type & VIRTIO_NET_HDR_GSO_UDP_TUNNEL)) {
+ if (!(hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID))
+ return 0;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (!(hdr->flags & VIRTIO_NET_HDR_F_UDP_TUNNEL_CSUM))
+ return 0;
+
+ /* tunnel csum packets are invalid when the related
+ * feature has not been negotiated
+ */
+ if (!tnl_csum_negotiated)
+ return -EINVAL;
+ skb->csum_level = 1;
+ return 0;
+ }
+
+ /* DATA_VALID is mutually exclusive with NEEDS_CSUM, and GSO
+ * over UDP tunnel requires the latter
+ */
+ if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID)
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * vlan_hlen always refers to the outermost MAC header. That also
+ * means it refers to the only MAC header, if the packet does not carry
+ * any encapsulation.
+ */
+static inline int
+virtio_net_hdr_tnl_from_skb(const struct sk_buff *skb,
+ struct virtio_net_hdr_v1_hash_tunnel *vhdr,
+ bool tnl_hdr_negotiated,
+ bool little_endian,
+ int vlan_hlen)
+{
+ struct virtio_net_hdr *hdr = (struct virtio_net_hdr *)vhdr;
+ unsigned int inner_nh, outer_th;
+ int tnl_gso_type;
+ int ret;
+
+ tnl_gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL |
+ SKB_GSO_UDP_TUNNEL_CSUM);
+ if (!tnl_gso_type)
+ return virtio_net_hdr_from_skb(skb, hdr, little_endian, false,
+ vlan_hlen);
+
+ /* Tunnel support not negotiated but skb ask for it. */
+ if (!tnl_hdr_negotiated)
+ return -EINVAL;
+
+ /* Let the basic parsing deal with plain GSO features. */
+ skb_shinfo(skb)->gso_type &= ~tnl_gso_type;
+ ret = virtio_net_hdr_from_skb(skb, hdr, true, false, vlan_hlen);
+ skb_shinfo(skb)->gso_type |= tnl_gso_type;
+ if (ret)
+ return ret;
+
+ if (skb->protocol == htons(ETH_P_IPV6))
+ hdr->gso_type |= VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6;
+ else
+ hdr->gso_type |= VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV4;
+
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
+ hdr->flags |= VIRTIO_NET_HDR_F_UDP_TUNNEL_CSUM;
+
+ inner_nh = skb->inner_network_header - skb_headroom(skb);
+ outer_th = skb->transport_header - skb_headroom(skb);
+ vhdr->inner_nh_offset = cpu_to_le16(inner_nh);
+ vhdr->outer_th_offset = cpu_to_le16(outer_th);
+ return 0;
+}
+
#endif /* _LINUX_VIRTIO_NET_H */
diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h
index c0b1b1ca1163..48bc12d1045b 100644
--- a/include/linux/virtio_pci_modern.h
+++ b/include/linux/virtio_pci_modern.h
@@ -3,6 +3,7 @@
#define _LINUX_VIRTIO_PCI_MODERN_H
#include <linux/pci.h>
+#include <linux/virtio_config.h>
#include <linux/virtio_pci.h>
/**
@@ -95,10 +96,44 @@ static inline void vp_iowrite64_twopart(u64 val,
vp_iowrite32(val >> 32, hi);
}
-u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev);
-u64 vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev);
-void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
- u64 features);
+void
+vp_modern_get_driver_extended_features(struct virtio_pci_modern_device *mdev,
+ u64 *features);
+void vp_modern_get_extended_features(struct virtio_pci_modern_device *mdev,
+ u64 *features);
+void vp_modern_set_extended_features(struct virtio_pci_modern_device *mdev,
+ const u64 *features);
+
+static inline u64
+vp_modern_get_features(struct virtio_pci_modern_device *mdev)
+{
+ u64 features_array[VIRTIO_FEATURES_DWORDS];
+
+ vp_modern_get_extended_features(mdev, features_array);
+ return features_array[0];
+}
+
+static inline u64
+vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev)
+{
+ u64 features_array[VIRTIO_FEATURES_DWORDS];
+ int i;
+
+ vp_modern_get_driver_extended_features(mdev, features_array);
+ for (i = 1; i < VIRTIO_FEATURES_DWORDS; ++i)
+ WARN_ON_ONCE(features_array[i]);
+ return features_array[0];
+}
+
+static inline void
+vp_modern_set_features(struct virtio_pci_modern_device *mdev, u64 features)
+{
+ u64 features_array[VIRTIO_FEATURES_DWORDS];
+
+ virtio_features_from_u64(features_array, features);
+ vp_modern_set_extended_features(mdev, features_array);
+}
+
u32 vp_modern_generation(struct virtio_pci_modern_device *mdev);
u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev);
void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index b2ccb6845595..c287998908bf 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -507,7 +507,7 @@ static inline const char *lru_list_name(enum lru_list lru)
return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
}
-#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
+#if defined(CONFIG_VM_EVENT_COUNTERS)
static inline const char *vm_event_name(enum vm_event_item item)
{
return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
@@ -516,7 +516,7 @@ static inline const char *vm_event_name(enum vm_event_item item)
NR_VM_STAT_ITEMS +
item];
}
-#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
+#endif /* CONFIG_VM_EVENT_COUNTERS */
#ifdef CONFIG_MEMCG
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 965a19809c7e..09855d819418 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -164,6 +164,8 @@ static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
+extern int add_wait_queue_priority_exclusive(struct wait_queue_head *wq_head,
+ struct wait_queue_entry *wq_entry);
extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 6e30f275da77..45d5dd470ff6 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -6,6 +6,7 @@
#ifndef _LINUX_WORKQUEUE_H
#define _LINUX_WORKQUEUE_H
+#include <linux/alloc_tag.h>
#include <linux/timer.h>
#include <linux/linkage.h>
#include <linux/bitops.h>
@@ -401,6 +402,7 @@ enum wq_flags {
* http://thread.gmane.org/gmane.linux.kernel/1480396
*/
WQ_POWER_EFFICIENT = 1 << 7,
+ WQ_PERCPU = 1 << 8, /* bound to a specific cpu */
__WQ_DESTROYING = 1 << 15, /* internal: workqueue is destroying */
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
@@ -427,7 +429,7 @@ enum wq_consts {
/*
* System-wide workqueues which are always present.
*
- * system_wq is the one used by schedule[_delayed]_work[_on]().
+ * system_percpu_wq is the one used by schedule[_delayed]_work[_on]().
* Multi-CPU multi-threaded. There are users which expect relatively
* short queue flush time. Don't queue works which can run for too
* long.
@@ -438,7 +440,7 @@ enum wq_consts {
* system_long_wq is similar to system_wq but may host long running
* works. Queue flushing might take relatively long.
*
- * system_unbound_wq is unbound workqueue. Workers are not bound to
+ * system_dfl_wq is unbound workqueue. Workers are not bound to
* any specific CPU, not concurrency managed, and all queued works are
* executed immediately as long as max_active limit is not reached and
* resources are available.
@@ -455,10 +457,12 @@ enum wq_consts {
* system_bh[_highpri]_wq are convenience interface to softirq. BH work items
* are executed in the queueing CPU's BH context in the queueing order.
*/
-extern struct workqueue_struct *system_wq;
+extern struct workqueue_struct *system_wq; /* use system_percpu_wq, this will be removed */
+extern struct workqueue_struct *system_percpu_wq;
extern struct workqueue_struct *system_highpri_wq;
extern struct workqueue_struct *system_long_wq;
extern struct workqueue_struct *system_unbound_wq;
+extern struct workqueue_struct *system_dfl_wq;
extern struct workqueue_struct *system_freezable_wq;
extern struct workqueue_struct *system_power_efficient_wq;
extern struct workqueue_struct *system_freezable_power_efficient_wq;
@@ -505,7 +509,8 @@ void workqueue_softirq_dead(unsigned int cpu);
* Pointer to the allocated workqueue on success, %NULL on failure.
*/
__printf(1, 4) struct workqueue_struct *
-alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
+alloc_workqueue_noprof(const char *fmt, unsigned int flags, int max_active, ...);
+#define alloc_workqueue(...) alloc_hooks(alloc_workqueue_noprof(__VA_ARGS__))
#ifdef CONFIG_LOCKDEP
/**
@@ -544,8 +549,8 @@ alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active,
* Pointer to the allocated workqueue on success, %NULL on failure.
*/
#define alloc_ordered_workqueue_lockdep_map(fmt, flags, lockdep_map, args...) \
- alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), \
- 1, lockdep_map, ##args)
+ alloc_hooks(alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags),\
+ 1, lockdep_map, ##args))
#endif
/**
@@ -577,7 +582,9 @@ alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active,
extern void destroy_workqueue(struct workqueue_struct *wq);
-struct workqueue_attrs *alloc_workqueue_attrs(void);
+struct workqueue_attrs *alloc_workqueue_attrs_noprof(void);
+#define alloc_workqueue_attrs(...) alloc_hooks(alloc_workqueue_attrs_noprof(__VA_ARGS__))
+
void free_workqueue_attrs(struct workqueue_attrs *attrs);
int apply_workqueue_attrs(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs);
@@ -840,19 +847,6 @@ long work_on_cpu_key(int cpu, long (*fn)(void *),
work_on_cpu_key(_cpu, _fn, _arg, &__key); \
})
-long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
- void *arg, struct lock_class_key *key);
-
-/*
- * A new key is defined for each caller to make sure the work
- * associated with the function doesn't share its locking class.
- */
-#define work_on_cpu_safe(_cpu, _fn, _arg) \
-({ \
- static struct lock_class_key __key; \
- \
- work_on_cpu_safe_key(_cpu, _fn, _arg, &__key); \
-})
#endif /* CONFIG_SMP */
#ifdef CONFIG_FREEZER
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index eda4b62511f7..a2848d731a46 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -59,7 +59,6 @@ struct writeback_control {
unsigned for_kupdate:1; /* A kupdate writeback */
unsigned for_background:1; /* A background writeback */
unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */
- unsigned for_reclaim:1; /* Invoked from the page allocator */
unsigned range_cyclic:1; /* range_start is cyclic */
unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
unsigned unpinned_netfs_wb:1; /* Cleared I_PINNING_NETFS_WB */
@@ -72,16 +71,6 @@ struct writeback_control {
*/
unsigned no_cgroup_owner:1;
- /* To enable batching of swap writes to non-block-device backends,
- * "plug" can be set point to a 'struct swap_iocb *'. When all swap
- * writes have been submitted, if with swap_iocb is not NULL,
- * swap_write_unplug() should be called.
- */
- struct swap_iocb **swap_plug;
-
- /* Target list for splitting a large folio */
- struct list_head *list;
-
/* internal fields used by the ->writepages implementation: */
struct folio_batch fbatch;
pgoff_t index;
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 13e9cc5490f7..f3ccff2d966c 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -46,4 +46,6 @@ void zs_obj_read_end(struct zs_pool *pool, unsigned long handle,
void zs_obj_write(struct zs_pool *pool, unsigned long handle,
void *handle_mem, size_t mem_len);
+extern const struct movable_operations zsmalloc_mops;
+
#endif
diff --git a/include/media/rcar-fcp.h b/include/media/rcar-fcp.h
index 179240fb163b..6ac9be9f675e 100644
--- a/include/media/rcar-fcp.h
+++ b/include/media/rcar-fcp.h
@@ -18,6 +18,7 @@ void rcar_fcp_put(struct rcar_fcp_device *fcp);
struct device *rcar_fcp_get_device(struct rcar_fcp_device *fcp);
int rcar_fcp_enable(struct rcar_fcp_device *fcp);
void rcar_fcp_disable(struct rcar_fcp_device *fcp);
+int rcar_fcp_soft_reset(struct rcar_fcp_device *fcp);
#else
static inline struct rcar_fcp_device *rcar_fcp_get(const struct device_node *np)
{
@@ -33,6 +34,10 @@ static inline int rcar_fcp_enable(struct rcar_fcp_device *fcp)
return 0;
}
static inline void rcar_fcp_disable(struct rcar_fcp_device *fcp) { }
+static inline int rcar_fcp_soft_reset(struct rcar_fcp_device *fcp)
+{
+ return 0;
+}
#endif
#endif /* __MEDIA_RCAR_FCP_H__ */
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 3a87096e064f..c32c46286441 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -579,8 +579,10 @@ int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl,
* @hdl: The control handler.
*
* Does nothing if @hdl == NULL.
+ *
+ * Return: @hdl's error field or 0 if @hdl is NULL.
*/
-void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl);
+int v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl);
/**
* v4l2_ctrl_lock() - Helper function to lock the handler
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 1b6222fab24e..a69801274800 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -313,10 +313,16 @@ struct video_device {
* media_entity_to_video_device - Returns a &struct video_device from
* the &struct media_entity embedded on it.
*
- * @__entity: pointer to &struct media_entity
- */
-#define media_entity_to_video_device(__entity) \
- container_of(__entity, struct video_device, entity)
+ * @__entity: pointer to &struct media_entity, may be NULL
+ */
+#define media_entity_to_video_device(__entity) \
+({ \
+ typeof(__entity) __me_vdev_ent = __entity; \
+ \
+ __me_vdev_ent ? \
+ container_of(__me_vdev_ent, struct video_device, entity) : \
+ NULL; \
+})
/**
* to_video_device - Returns a &struct video_device from the
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index c6ec87e88dfe..82695c3a300a 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -679,6 +679,7 @@ long int v4l2_compat_ioctl32(struct file *file, unsigned int cmd,
#endif
unsigned int v4l2_compat_translate_cmd(unsigned int cmd);
+unsigned int v4l2_translate_cmd(unsigned int cmd);
int v4l2_compat_get_user(void __user *arg, void *parg, unsigned int cmd);
int v4l2_compat_put_user(void __user *arg, void *parg, unsigned int cmd);
int v4l2_compat_get_array_args(struct file *file, void *mbuf,
diff --git a/include/media/v4l2-jpeg.h b/include/media/v4l2-jpeg.h
index b65658a02e3c..62dda1560275 100644
--- a/include/media/v4l2-jpeg.h
+++ b/include/media/v4l2-jpeg.h
@@ -169,15 +169,6 @@ struct v4l2_jpeg_header {
int v4l2_jpeg_parse_header(void *buf, size_t len, struct v4l2_jpeg_header *out);
-int v4l2_jpeg_parse_frame_header(void *buf, size_t len,
- struct v4l2_jpeg_frame_header *frame_header);
-int v4l2_jpeg_parse_scan_header(void *buf, size_t len,
- struct v4l2_jpeg_scan_header *scan_header);
-int v4l2_jpeg_parse_quantization_tables(void *buf, size_t len, u8 precision,
- struct v4l2_jpeg_reference *q_tables);
-int v4l2_jpeg_parse_huffman_tables(void *buf, size_t len,
- struct v4l2_jpeg_reference *huffman_tables);
-
extern const u8 v4l2_jpeg_zigzag_scan_index[V4L2_JPEG_PIXELS_IN_BLOCK];
extern const u8 v4l2_jpeg_ref_table_luma_qt[V4L2_JPEG_PIXELS_IN_BLOCK];
extern const u8 v4l2_jpeg_ref_table_chroma_qt[V4L2_JPEG_PIXELS_IN_BLOCK];
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 57f2bcb4eb16..5dcf4065708f 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -460,8 +460,6 @@ enum v4l2_subdev_pre_streamon_flags {
* but use the v4l2_subdev_enable_streams() and
* v4l2_subdev_disable_streams() helpers.
*
- * @g_pixelaspect: callback to return the pixelaspect ratio.
- *
* @s_rx_buffer: set a host allocated memory buffer for the subdev. The subdev
* can adjust @size to a lower value and must not write more data to the
* buffer starting at @data than the original value of @size.
@@ -491,7 +489,6 @@ struct v4l2_subdev_video_ops {
int (*g_tvnorms_output)(struct v4l2_subdev *sd, v4l2_std_id *std);
int (*g_input_status)(struct v4l2_subdev *sd, u32 *status);
int (*s_stream)(struct v4l2_subdev *sd, int enable);
- int (*g_pixelaspect)(struct v4l2_subdev *sd, struct v4l2_fract *aspect);
int (*s_rx_buffer)(struct v4l2_subdev *sd, void *buf,
unsigned int *size);
int (*pre_streamon)(struct v4l2_subdev *sd, u32 flags);
diff --git a/include/media/vsp1.h b/include/media/vsp1.h
index 4ea6352fd63f..d9b91ff02761 100644
--- a/include/media/vsp1.h
+++ b/include/media/vsp1.h
@@ -14,6 +14,11 @@
#include <linux/videodev2.h>
struct device;
+struct vsp1_dl_list;
+
+/* -----------------------------------------------------------------------------
+ * VSP1 DU interface
+ */
int vsp1_du_init(struct device *dev);
@@ -121,4 +126,88 @@ void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index,
int vsp1_du_map_sg(struct device *dev, struct sg_table *sgt);
void vsp1_du_unmap_sg(struct device *dev, struct sg_table *sgt);
+/* -----------------------------------------------------------------------------
+ * VSP1 ISP interface
+ */
+
+/**
+ * struct vsp1_isp_buffer_desc - Describe a buffer allocated by VSPX
+ * @size: Byte size of the buffer allocated by VSPX
+ * @cpu_addr: CPU-mapped address of a buffer allocated by VSPX
+ * @dma_addr: bus address of a buffer allocated by VSPX
+ */
+struct vsp1_isp_buffer_desc {
+ size_t size;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+};
+
+/**
+ * struct vsp1_isp_job_desc - Describe a VSPX buffer transfer request
+ * @config: ConfigDMA buffer descriptor
+ * @config.pairs: number of reg-value pairs in the ConfigDMA buffer
+ * @config.mem: bus address of the ConfigDMA buffer
+ * @img: RAW image buffer descriptor
+ * @img.fmt: RAW image format
+ * @img.mem: bus address of the RAW image buffer
+ * @dl: pointer to the display list populated by the VSPX driver in the
+ * vsp1_isp_job_prepare() function
+ *
+ * Describe a transfer request for the VSPX to perform on behalf of the ISP.
+ * The job descriptor contains an optional ConfigDMA buffer and one RAW image
+ * buffer. Set config.pairs to 0 if no ConfigDMA buffer should be transferred.
+ * The minimum number of config.pairs that can be written using ConfigDMA is 17.
+ * A number of pairs < 16 corrupts the output image. A number of pairs == 16
+ * freezes the VSPX operation. If the ISP driver has to write less than 17 pairs
+ * it shall pad the buffer with writes directed to registers that have no effect
+ * or avoid using ConfigDMA at all for such small write sequences.
+ *
+ * The ISP driver shall pass an instance this type to the vsp1_isp_job_prepare()
+ * function that will populate the display list pointer @dl using the @config
+ * and @img descriptors. When the job has to be run on the VSPX, the descriptor
+ * shall be passed to vsp1_isp_job_run() which consumes the display list.
+ *
+ * Job descriptors not yet run shall be released with a call to
+ * vsp1_isp_job_release() when stopping the streaming in order to properly
+ * release the resources acquired by vsp1_isp_job_prepare().
+ */
+struct vsp1_isp_job_desc {
+ struct {
+ unsigned int pairs;
+ dma_addr_t mem;
+ } config;
+ struct {
+ struct v4l2_pix_format_mplane fmt;
+ dma_addr_t mem;
+ } img;
+ struct vsp1_dl_list *dl;
+};
+
+/**
+ * struct vsp1_vspx_frame_end - VSPX frame end callback data
+ * @vspx_frame_end: Frame end callback. Called after a transfer job has been
+ * completed. If the job includes both a ConfigDMA and a
+ * RAW image, the callback is called after both have been
+ * transferred
+ * @frame_end_data: Frame end callback data, passed to vspx_frame_end
+ */
+struct vsp1_vspx_frame_end {
+ void (*vspx_frame_end)(void *data);
+ void *frame_end_data;
+};
+
+int vsp1_isp_init(struct device *dev);
+struct device *vsp1_isp_get_bus_master(struct device *dev);
+int vsp1_isp_alloc_buffer(struct device *dev, size_t size,
+ struct vsp1_isp_buffer_desc *buffer_desc);
+void vsp1_isp_free_buffer(struct device *dev,
+ struct vsp1_isp_buffer_desc *buffer_desc);
+int vsp1_isp_start_streaming(struct device *dev,
+ struct vsp1_vspx_frame_end *frame_end);
+void vsp1_isp_stop_streaming(struct device *dev);
+int vsp1_isp_job_prepare(struct device *dev,
+ struct vsp1_isp_job_desc *job);
+int vsp1_isp_job_run(struct device *dev, struct vsp1_isp_job_desc *job);
+void vsp1_isp_job_release(struct device *dev, struct vsp1_isp_job_desc *job);
+
#endif /* __MEDIA_VSP1_H__ */
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 404df8557f6a..2894cfff2da3 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -76,19 +76,24 @@ static inline void tcf_lastuse_update(struct tcf_t *tm)
{
unsigned long now = jiffies;
- if (tm->lastuse != now)
- tm->lastuse = now;
- if (unlikely(!tm->firstuse))
- tm->firstuse = now;
+ if (READ_ONCE(tm->lastuse) != now)
+ WRITE_ONCE(tm->lastuse, now);
+ if (unlikely(!READ_ONCE(tm->firstuse)))
+ WRITE_ONCE(tm->firstuse, now);
}
static inline void tcf_tm_dump(struct tcf_t *dtm, const struct tcf_t *stm)
{
- dtm->install = jiffies_to_clock_t(jiffies - stm->install);
- dtm->lastuse = jiffies_to_clock_t(jiffies - stm->lastuse);
- dtm->firstuse = stm->firstuse ?
- jiffies_to_clock_t(jiffies - stm->firstuse) : 0;
- dtm->expires = jiffies_to_clock_t(stm->expires);
+ unsigned long firstuse, now = jiffies;
+
+ dtm->install = jiffies_to_clock_t(now - READ_ONCE(stm->install));
+ dtm->lastuse = jiffies_to_clock_t(now - READ_ONCE(stm->lastuse));
+
+ firstuse = READ_ONCE(stm->firstuse);
+ dtm->firstuse = firstuse ?
+ jiffies_to_clock_t(now - firstuse) : 0;
+
+ dtm->expires = jiffies_to_clock_t(READ_ONCE(stm->expires));
}
static inline enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats)
@@ -170,14 +175,12 @@ static inline void tc_action_net_exit(struct list_head *net_list,
{
struct net *net;
- rtnl_lock();
list_for_each_entry(net, net_list, exit_list) {
struct tc_action_net *tn = net_generic(net, id);
tcf_idrinfo_destroy(tn->ops, tn->idrinfo);
kfree(tn->idrinfo);
}
- rtnl_unlock();
}
int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 1af1841b7601..34f53dde65ce 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -47,6 +47,8 @@ struct unix_sock {
#define peer_wait peer_wq.wait
wait_queue_entry_t peer_wake;
struct scm_stat scm_stat;
+ int inq_len;
+ bool recvmsg_inq;
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
struct sk_buff *oob_skb;
#endif
diff --git a/include/net/aligned_data.h b/include/net/aligned_data.h
new file mode 100644
index 000000000000..e1a1c8aedc79
--- /dev/null
+++ b/include/net/aligned_data.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _NET_ALIGNED_DATA_H
+#define _NET_ALIGNED_DATA_H
+
+#include <linux/atomic.h>
+#include <linux/types.h>
+
+/* Structure holding cacheline aligned fields on SMP builds.
+ * Each field or group should have an ____cacheline_aligned_in_smp
+ * attribute to ensure no accidental false sharing can happen.
+ */
+struct net_aligned_data {
+ atomic64_t net_cookie ____cacheline_aligned_in_smp;
+#if defined(CONFIG_INET)
+ atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp;
+ atomic_long_t udp_memory_allocated ____cacheline_aligned_in_smp;
+#endif
+};
+
+extern struct net_aligned_data net_aligned_data;
+
+#endif /* _NET_ALIGNED_DATA_H */
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 114299bd8b98..ada5b56a4413 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -244,6 +244,12 @@ struct bt_codecs {
#define BT_ISO_BASE 20
+/* Socket option value 21 reserved */
+
+#define BT_PKT_SEQNUM 22
+
+#define BT_SCM_PKT_SEQNUM 0x05
+
__printf(1, 2)
void bt_info(const char *fmt, ...);
__printf(1, 2)
@@ -391,7 +397,8 @@ struct bt_sock {
enum {
BT_SK_DEFER_SETUP,
BT_SK_SUSPEND,
- BT_SK_PKT_STATUS
+ BT_SK_PKT_STATUS,
+ BT_SK_PKT_SEQNUM,
};
struct bt_sock_list {
@@ -475,6 +482,7 @@ struct bt_skb_cb {
u8 pkt_type;
u8 force_active;
u16 expect;
+ u16 pkt_seqnum;
u8 incoming:1;
u8 pkt_status:2;
union {
@@ -488,6 +496,7 @@ struct bt_skb_cb {
#define hci_skb_pkt_type(skb) bt_cb((skb))->pkt_type
#define hci_skb_pkt_status(skb) bt_cb((skb))->pkt_status
+#define hci_skb_pkt_seqnum(skb) bt_cb((skb))->pkt_seqnum
#define hci_skb_expect(skb) bt_cb((skb))->expect
#define hci_skb_opcode(skb) bt_cb((skb))->hci.opcode
#define hci_skb_event(skb) bt_cb((skb))->hci.req_event
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index c79901f2dc2a..df1847b74e55 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -562,6 +562,7 @@ enum {
#define LE_LINK 0x80
#define CIS_LINK 0x82
#define BIS_LINK 0x83
+#define PA_LINK 0x84
#define INVALID_LINK 0xff
/* LMP features */
@@ -2634,6 +2635,7 @@ struct hci_ev_le_conn_complete {
#define LE_EXT_ADV_DIRECT_IND 0x0004
#define LE_EXT_ADV_SCAN_RSP 0x0008
#define LE_EXT_ADV_LEGACY_PDU 0x0010
+#define LE_EXT_ADV_DATA_STATUS_MASK 0x0060
#define LE_EXT_ADV_EVT_TYPE_MASK 0x007f
#define ADDR_LE_DEV_PUBLIC 0x00
@@ -2837,7 +2839,7 @@ struct hci_evt_le_create_big_complete {
} __packed;
#define HCI_EVT_LE_BIG_SYNC_ESTABLISHED 0x1d
-struct hci_evt_le_big_sync_estabilished {
+struct hci_evt_le_big_sync_established {
__u8 status;
__u8 handle;
__u8 latency[3];
@@ -2851,6 +2853,12 @@ struct hci_evt_le_big_sync_estabilished {
__le16 bis[];
} __packed;
+#define HCI_EVT_LE_BIG_SYNC_LOST 0x1e
+struct hci_evt_le_big_sync_lost {
+ __u8 handle;
+ __u8 reason;
+} __packed;
+
#define HCI_EVT_LE_BIG_INFO_ADV_REPORT 0x22
struct hci_evt_le_big_info_adv_report {
__le16 sync_handle;
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index f79f59e67114..4dc11c66f7b8 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -29,6 +29,7 @@
#include <linux/idr.h>
#include <linux/leds.h>
#include <linux/rculist.h>
+#include <linux/spinlock.h>
#include <linux/srcu.h>
#include <net/bluetooth/hci.h>
@@ -94,6 +95,7 @@ struct discovery_state {
u16 uuid_count;
u8 (*uuids)[16];
unsigned long name_resolve_timeout;
+ spinlock_t lock;
};
#define SUSPEND_NOTIFIER_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
@@ -889,6 +891,7 @@ static inline void iso_recv(struct hci_conn *hcon, struct sk_buff *skb,
static inline void discovery_init(struct hci_dev *hdev)
{
+ spin_lock_init(&hdev->discovery.lock);
hdev->discovery.state = DISCOVERY_STOPPED;
INIT_LIST_HEAD(&hdev->discovery.all);
INIT_LIST_HEAD(&hdev->discovery.unknown);
@@ -903,8 +906,11 @@ static inline void hci_discovery_filter_clear(struct hci_dev *hdev)
hdev->discovery.report_invalid_rssi = true;
hdev->discovery.rssi = HCI_RSSI_INVALID;
hdev->discovery.uuid_count = 0;
+
+ spin_lock(&hdev->discovery.lock);
kfree(hdev->discovery.uuids);
hdev->discovery.uuids = NULL;
+ spin_unlock(&hdev->discovery.lock);
}
bool hci_discovery_active(struct hci_dev *hdev);
@@ -1009,6 +1015,7 @@ static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
break;
case CIS_LINK:
case BIS_LINK:
+ case PA_LINK:
h->iso_num++;
break;
}
@@ -1036,6 +1043,7 @@ static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
break;
case CIS_LINK:
case BIS_LINK:
+ case PA_LINK:
h->iso_num--;
break;
}
@@ -1054,6 +1062,7 @@ static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type)
return h->sco_num;
case CIS_LINK:
case BIS_LINK:
+ case PA_LINK:
return h->iso_num;
default:
return 0;
@@ -1136,7 +1145,7 @@ hci_conn_hash_lookup_create_pa_sync(struct hci_dev *hdev)
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (c->type != BIS_LINK)
+ if (c->type != PA_LINK)
continue;
if (!test_bit(HCI_CONN_CREATE_PA_SYNC, &c->flags))
@@ -1331,7 +1340,7 @@ hci_conn_hash_lookup_big_sync_pend(struct hci_dev *hdev,
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (c->type != BIS_LINK)
+ if (c->type != PA_LINK)
continue;
if (handle == c->iso_qos.bcast.big && num_bis == c->num_bis) {
@@ -1346,7 +1355,8 @@ hci_conn_hash_lookup_big_sync_pend(struct hci_dev *hdev,
}
static inline struct hci_conn *
-hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle, __u16 state)
+hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle, __u16 state,
+ __u8 role)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *c;
@@ -1354,7 +1364,7 @@ hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle, __u16 state)
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (c->type != BIS_LINK || c->state != state)
+ if (c->type != BIS_LINK || c->state != state || c->role != role)
continue;
if (handle == c->iso_qos.bcast.big) {
@@ -1400,7 +1410,7 @@ hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle)
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (c->type != BIS_LINK)
+ if (c->type != PA_LINK)
continue;
/* Ignore the listen hcon, we are looking
@@ -1420,26 +1430,6 @@ hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle)
return NULL;
}
-static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
- __u8 type, __u16 state)
-{
- struct hci_conn_hash *h = &hdev->conn_hash;
- struct hci_conn *c;
-
- rcu_read_lock();
-
- list_for_each_entry_rcu(c, &h->list, list) {
- if (c->type == type && c->state == state) {
- rcu_read_unlock();
- return c;
- }
- }
-
- rcu_read_unlock();
-
- return NULL;
-}
-
typedef void (*hci_conn_func_t)(struct hci_conn *conn, void *data);
static inline void hci_conn_hash_list_state(struct hci_dev *hdev,
hci_conn_func_t func, __u8 type,
@@ -2019,6 +2009,7 @@ static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
case CIS_LINK:
case BIS_LINK:
+ case PA_LINK:
return iso_connect_ind(hdev, bdaddr, flags);
default:
diff --git a/include/net/bond_options.h b/include/net/bond_options.h
index 18687ccf0638..022b122a9fb6 100644
--- a/include/net/bond_options.h
+++ b/include/net/bond_options.h
@@ -77,6 +77,7 @@ enum {
BOND_OPT_NS_TARGETS,
BOND_OPT_PRIO,
BOND_OPT_COUPLED_CONTROL,
+ BOND_OPT_BROADCAST_NEIGH,
BOND_OPT_LAST
};
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 95f67b308c19..e06f0d63b2c1 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -115,6 +115,8 @@ static inline int is_netpoll_tx_blocked(struct net_device *dev)
#define is_netpoll_tx_blocked(dev) (0)
#endif
+DECLARE_STATIC_KEY_FALSE(bond_bcast_neigh_enabled);
+
struct bond_params {
int mode;
int xmit_policy;
@@ -149,6 +151,7 @@ struct bond_params {
struct in6_addr ns_targets[BOND_MAX_NS_TARGETS];
#endif
int coupled_control;
+ int broadcast_neighbor;
/* 2 bytes of padding : see ether_addr_equal_64bits() */
u8 ad_actor_system[ETH_ALEN + 2];
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 10248d527616..406626ff6cc8 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -560,7 +560,7 @@ struct ieee80211_sta_s1g_cap {
* @vht_cap: VHT capabilities in this band
* @s1g_cap: S1G capabilities in this band
* @edmg_cap: EDMG capabilities in this band
- * @s1g_cap: S1G capabilities in this band (S1B band only, of course)
+ * @s1g_cap: S1G capabilities in this band (S1G band only, of course)
* @n_iftype_data: number of iftype data entries
* @iftype_data: interface type data entries. Note that the bits in
* @types_mask inside this structure cannot overlap (i.e. only
@@ -633,7 +633,7 @@ ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband,
const struct ieee80211_sband_iftype_data *data;
int i;
- if (WARN_ON(iftype >= NL80211_IFTYPE_MAX))
+ if (WARN_ON(iftype >= NUM_NL80211_IFTYPES))
return NULL;
if (iftype == NL80211_IFTYPE_AP_VLAN)
@@ -1424,6 +1424,23 @@ struct cfg80211_unsol_bcast_probe_resp {
};
/**
+ * struct cfg80211_s1g_short_beacon - S1G short beacon data.
+ *
+ * @update: Set to true if the feature configuration should be updated.
+ * @short_head: Short beacon head.
+ * @short_tail: Short beacon tail.
+ * @short_head_len: Short beacon head len.
+ * @short_tail_len: Short beacon tail len.
+ */
+struct cfg80211_s1g_short_beacon {
+ bool update;
+ const u8 *short_head;
+ const u8 *short_tail;
+ size_t short_head_len;
+ size_t short_tail_len;
+};
+
+/**
* struct cfg80211_ap_settings - AP configuration
*
* Used to configure an AP interface.
@@ -1463,6 +1480,8 @@ struct cfg80211_unsol_bcast_probe_resp {
* @fils_discovery: FILS discovery transmission parameters
* @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
* @mbssid_config: AP settings for multiple bssid
+ * @s1g_long_beacon_period: S1G long beacon period
+ * @s1g_short_beacon: S1G short beacon data
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -1496,6 +1515,8 @@ struct cfg80211_ap_settings {
struct cfg80211_fils_discovery fils_discovery;
struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
struct cfg80211_mbssid_config mbssid_config;
+ u8 s1g_long_beacon_period;
+ struct cfg80211_s1g_short_beacon s1g_short_beacon;
};
@@ -1507,11 +1528,13 @@ struct cfg80211_ap_settings {
* @beacon: beacon data
* @fils_discovery: FILS discovery transmission parameters
* @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
+ * @s1g_short_beacon: S1G short beacon data
*/
struct cfg80211_ap_update {
struct cfg80211_beacon_data beacon;
struct cfg80211_fils_discovery fils_discovery;
struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
+ struct cfg80211_s1g_short_beacon s1g_short_beacon;
};
/**
@@ -1526,6 +1549,7 @@ struct cfg80211_ap_update {
* @n_counter_offsets_beacon: number of csa counters the beacon (tail)
* @n_counter_offsets_presp: number of csa counters in the probe response
* @beacon_after: beacon data to be used on the new channel
+ * @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
* @radar_required: whether radar detection is required on the new channel
* @block_tx: whether transmissions should be blocked while changing
* @count: number of beacons until switch
@@ -1540,6 +1564,7 @@ struct cfg80211_csa_settings {
unsigned int n_counter_offsets_beacon;
unsigned int n_counter_offsets_presp;
struct cfg80211_beacon_data beacon_after;
+ struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
bool radar_required;
bool block_tx;
u8 count;
@@ -1555,6 +1580,7 @@ struct cfg80211_csa_settings {
* @counter_offset_beacon: offsets of the counters within the beacon (tail)
* @counter_offset_presp: offsets of the counters within the probe response
* @beacon_next: beacon data to be used after the color change
+ * @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
* @count: number of beacons until the color change
* @color: the color used after the change
* @link_id: defines the link on which color change is expected during MLO.
@@ -1565,6 +1591,7 @@ struct cfg80211_color_change_settings {
u16 counter_offset_beacon;
u16 counter_offset_presp;
struct cfg80211_beacon_data beacon_next;
+ struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
u8 count;
u8 color;
u8 link_id;
@@ -1653,6 +1680,7 @@ struct sta_txpwr {
* @he_6ghz_capa: HE 6 GHz Band capabilities of station
* @eht_capa: EHT capabilities of station
* @eht_capa_len: the length of the EHT capabilities
+ * @s1g_capa: S1G capabilities of station
*/
struct link_station_parameters {
const u8 *mld_mac;
@@ -1671,6 +1699,7 @@ struct link_station_parameters {
const struct ieee80211_he_6ghz_capa *he_6ghz_capa;
const struct ieee80211_eht_cap_elem *eht_capa;
u8 eht_capa_len;
+ const struct ieee80211_s1g_cap *s1g_capa;
};
/**
@@ -2018,6 +2047,99 @@ struct cfg80211_tid_stats {
#define IEEE80211_MAX_CHAINS 4
/**
+ * struct link_station_info - link station information
+ *
+ * Link station information filled by driver for get_station() and
+ * dump_station().
+ * @filled: bit flag of flags using the bits of &enum nl80211_sta_info to
+ * indicate the relevant values in this struct for them
+ * @connected_time: time(in secs) since a link of station is last connected
+ * @inactive_time: time since last activity for link station(tx/rx)
+ * in milliseconds
+ * @assoc_at: bootime (ns) of the last association of link of station
+ * @rx_bytes: bytes (size of MPDUs) received from this link of station
+ * @tx_bytes: bytes (size of MPDUs) transmitted to this link of station
+ * @signal: The signal strength, type depends on the wiphy's signal_type.
+ * For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_.
+ * @signal_avg: Average signal strength, type depends on the wiphy's
+ * signal_type. For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_
+ * @chains: bitmask for filled values in @chain_signal, @chain_signal_avg
+ * @chain_signal: per-chain signal strength of last received packet in dBm
+ * @chain_signal_avg: per-chain signal strength average in dBm
+ * @txrate: current unicast bitrate from this link of station
+ * @rxrate: current unicast bitrate to this link of station
+ * @rx_packets: packets (MSDUs & MMPDUs) received from this link of station
+ * @tx_packets: packets (MSDUs & MMPDUs) transmitted to this link of station
+ * @tx_retries: cumulative retry counts (MPDUs) for this link of station
+ * @tx_failed: number of failed transmissions (MPDUs) (retries exceeded, no ACK)
+ * @rx_dropped_misc: Dropped for un-specified reason.
+ * @bss_param: current BSS parameters
+ * @beacon_loss_count: Number of times beacon loss event has triggered.
+ * @expected_throughput: expected throughput in kbps (including 802.11 headers)
+ * towards this station.
+ * @rx_beacon: number of beacons received from this peer
+ * @rx_beacon_signal_avg: signal strength average (in dBm) for beacons received
+ * from this peer
+ * @rx_duration: aggregate PPDU duration(usecs) for all the frames from a peer
+ * @tx_duration: aggregate PPDU duration(usecs) for all the frames to a peer
+ * @airtime_weight: current airtime scheduling weight
+ * @pertid: per-TID statistics, see &struct cfg80211_tid_stats, using the last
+ * (IEEE80211_NUM_TIDS) index for MSDUs not encapsulated in QoS-MPDUs.
+ * Note that this doesn't use the @filled bit, but is used if non-NULL.
+ * @ack_signal: signal strength (in dBm) of the last ACK frame.
+ * @avg_ack_signal: average rssi value of ack packet for the no of msdu's has
+ * been sent.
+ * @rx_mpdu_count: number of MPDUs received from this station
+ * @fcs_err_count: number of packets (MPDUs) received from this station with
+ * an FCS error. This counter should be incremented only when TA of the
+ * received packet with an FCS error matches the peer MAC address.
+ * @addr: For MLO STA connection, filled with address of the link of station.
+ */
+struct link_station_info {
+ u64 filled;
+ u32 connected_time;
+ u32 inactive_time;
+ u64 assoc_at;
+ u64 rx_bytes;
+ u64 tx_bytes;
+ s8 signal;
+ s8 signal_avg;
+
+ u8 chains;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
+ s8 chain_signal_avg[IEEE80211_MAX_CHAINS];
+
+ struct rate_info txrate;
+ struct rate_info rxrate;
+ u32 rx_packets;
+ u32 tx_packets;
+ u32 tx_retries;
+ u32 tx_failed;
+ u32 rx_dropped_misc;
+ struct sta_bss_parameters bss_param;
+
+ u32 beacon_loss_count;
+
+ u32 expected_throughput;
+
+ u64 tx_duration;
+ u64 rx_duration;
+ u64 rx_beacon;
+ u8 rx_beacon_signal_avg;
+
+ u16 airtime_weight;
+
+ s8 ack_signal;
+ s8 avg_ack_signal;
+ struct cfg80211_tid_stats *pertid;
+
+ u32 rx_mpdu_count;
+ u32 fcs_err_count;
+
+ u8 addr[ETH_ALEN] __aligned(2);
+};
+
+/**
* struct station_info - station information
*
* Station information filled by driver for get_station() and dump_station.
@@ -2101,6 +2223,11 @@ struct cfg80211_tid_stats {
* dump_station() callbacks. User space needs this information to determine
* the accepted and rejected affiliated links of the connected station.
* @assoc_resp_ies_len: Length of @assoc_resp_ies buffer in octets.
+ * @valid_links: bitmap of valid links, or 0 for non-MLO. Drivers fill this
+ * information in cfg80211_new_sta(), cfg80211_del_sta_sinfo(),
+ * get_station() and dump_station() callbacks.
+ * @links: reference to Link sta entries for MLO STA, all link specific
+ * information is accessed through links[link_id].
*/
struct station_info {
u64 filled;
@@ -2165,6 +2292,9 @@ struct station_info {
u8 mld_addr[ETH_ALEN] __aligned(2);
const u8 *assoc_resp_ies;
size_t assoc_resp_ies_len;
+
+ u16 valid_links;
+ struct link_station_info *links[IEEE80211_MLD_MAX_NUM_LINKS];
};
/**
@@ -2645,15 +2775,16 @@ struct cfg80211_scan_6ghz_params {
* @wiphy: the wiphy this was for
* @scan_start: time (in jiffies) when the scan started
* @wdev: the wireless device to scan for
- * @info: (internal) information about completed scan
- * @notified: (internal) scan request was notified as done or aborted
* @no_cck: used to send probe requests at non CCK rate in 2GHz band
* @mac_addr: MAC address used with randomisation
* @mac_addr_mask: MAC address mask used with randomisation, bits that
* are 0 in the mask should be randomised, bits that are 1 should
* be taken from the @mac_addr
* @scan_6ghz: relevant for split scan request only,
- * true if this is the second scan request
+ * true if this is a 6 GHz scan request
+ * @first_part: %true if this is the first part of a split scan request or a
+ * scan that was not split. May be %true for a @scan_6ghz scan if no other
+ * channels were requested
* @n_6ghz_params: number of 6 GHz params
* @scan_6ghz_params: 6 GHz params
* @bssid: BSSID to scan for (most commonly, the wildcard BSSID)
@@ -2677,14 +2808,11 @@ struct cfg80211_scan_request {
u8 mac_addr[ETH_ALEN] __aligned(2);
u8 mac_addr_mask[ETH_ALEN] __aligned(2);
u8 bssid[ETH_ALEN] __aligned(2);
-
- /* internal */
struct wiphy *wiphy;
unsigned long scan_start;
- struct cfg80211_scan_info info;
- bool notified;
bool no_cck;
bool scan_6ghz;
+ bool first_part;
u32 n_6ghz_params;
struct cfg80211_scan_6ghz_params *scan_6ghz_params;
s8 tsf_report_link_id;
@@ -4752,12 +4880,14 @@ struct cfg80211_ops {
int (*set_mcast_rate)(struct wiphy *wiphy, struct net_device *dev,
int rate[NUM_NL80211_BANDS]);
- int (*set_wiphy_params)(struct wiphy *wiphy, u32 changed);
+ int (*set_wiphy_params)(struct wiphy *wiphy, int radio_idx,
+ u32 changed);
int (*set_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ int radio_idx,
enum nl80211_tx_power_setting type, int mbm);
int (*get_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev,
- unsigned int link_id, int *dbm);
+ int radio_idx, unsigned int link_id, int *dbm);
void (*rfkill_poll)(struct wiphy *wiphy);
@@ -4819,8 +4949,10 @@ struct cfg80211_ops {
struct wireless_dev *wdev,
struct mgmt_frame_regs *upd);
- int (*set_antenna)(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant);
- int (*get_antenna)(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant);
+ int (*set_antenna)(struct wiphy *wiphy, int radio_idx,
+ u32 tx_ant, u32 rx_ant);
+ int (*get_antenna)(struct wiphy *wiphy, int radio_idx,
+ u32 *tx_ant, u32 *rx_ant);
int (*sched_scan_start)(struct wiphy *wiphy,
struct net_device *dev,
@@ -5443,6 +5575,18 @@ struct wiphy_iftype_akm_suites {
};
/**
+ * struct wiphy_radio_cfg - physical radio config of a wiphy
+ * This structure describes the configurations of a physical radio in a
+ * wiphy. It is used to denote per-radio attributes belonging to a wiphy.
+ *
+ * @rts_threshold: RTS threshold (dot11RTSThreshold);
+ * -1 (default) = RTS/CTS disabled
+ */
+struct wiphy_radio_cfg {
+ u32 rts_threshold;
+};
+
+/**
* struct wiphy_radio_freq_range - wiphy frequency range
* @start_freq: start range edge frequency (kHz)
* @end_freq: end range edge frequency (kHz)
@@ -5697,6 +5841,10 @@ struct wiphy_radio {
* supports enabling HW timestamping for all peers (i.e. no need to
* specify a mac address).
*
+ * @radio_cfg: configuration of radios belonging to a muli-radio wiphy. This
+ * struct contains a list of all radio specific attributes and should be
+ * used only for multi-radio wiphy.
+ *
* @radio: radios belonging to this wiphy
* @n_radio: number of radios
*/
@@ -5786,6 +5934,8 @@ struct wiphy {
void (*reg_notifier)(struct wiphy *wiphy,
struct regulatory_request *request);
+ struct wiphy_radio_cfg *radio_cfg;
+
/* fields below are read-only, assigned by cfg80211 */
const struct ieee80211_regdomain __rcu *regd;
@@ -8466,6 +8616,17 @@ void cfg80211_tx_mgmt_expired(struct wireless_dev *wdev, u64 cookie,
int cfg80211_sinfo_alloc_tid_stats(struct station_info *sinfo, gfp_t gfp);
/**
+ * cfg80211_link_sinfo_alloc_tid_stats - allocate per-tid statistics.
+ *
+ * @link_sinfo: the link station information
+ * @gfp: allocation flags
+ *
+ * Return: 0 on success. Non-zero on error.
+ */
+int cfg80211_link_sinfo_alloc_tid_stats(struct link_station_info *link_sinfo,
+ gfp_t gfp);
+
+/**
* cfg80211_sinfo_release_content - release contents of station info
* @sinfo: the station information
*
@@ -8476,6 +8637,13 @@ int cfg80211_sinfo_alloc_tid_stats(struct station_info *sinfo, gfp_t gfp);
static inline void cfg80211_sinfo_release_content(struct station_info *sinfo)
{
kfree(sinfo->pertid);
+
+ for (int link_id = 0; link_id < ARRAY_SIZE(sinfo->links); link_id++) {
+ if (sinfo->links[link_id]) {
+ kfree(sinfo->links[link_id]->pertid);
+ kfree(sinfo->links[link_id]);
+ }
+ }
}
/**
@@ -8880,6 +9048,7 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
/**
* cfg80211_rx_spurious_frame - inform userspace about a spurious frame
* @dev: The device the frame matched to
+ * @link_id: the link the frame was received on, -1 if not applicable or unknown
* @addr: the transmitter address
* @gfp: context flags
*
@@ -8889,13 +9058,14 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
* Return: %true if the frame was passed to userspace (or this failed
* for a reason other than not having a subscription.)
*/
-bool cfg80211_rx_spurious_frame(struct net_device *dev,
- const u8 *addr, gfp_t gfp);
+bool cfg80211_rx_spurious_frame(struct net_device *dev, const u8 *addr,
+ int link_id, gfp_t gfp);
/**
* cfg80211_rx_unexpected_4addr_frame - inform about unexpected WDS frame
* @dev: The device the frame matched to
* @addr: the transmitter address
+ * @link_id: the link the frame was received on, -1 if not applicable or unknown
* @gfp: context flags
*
* This function is used in AP mode (only!) to inform userspace that
@@ -8905,8 +9075,8 @@ bool cfg80211_rx_spurious_frame(struct net_device *dev,
* Return: %true if the frame was passed to userspace (or this failed
* for a reason other than not having a subscription.)
*/
-bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev,
- const u8 *addr, gfp_t gfp);
+bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev, const u8 *addr,
+ int link_id, gfp_t gfp);
/**
* cfg80211_probe_status - notify userspace about probe status
@@ -9372,6 +9542,17 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
void (*iter)(const struct ieee80211_iface_combination *c,
void *data),
void *data);
+/**
+ * cfg80211_get_radio_idx_by_chan - get the radio index by the channel
+ *
+ * @wiphy: the wiphy
+ * @chan: channel for which the supported radio index is required
+ *
+ * Return: radio index on success or a negative error code
+ */
+int cfg80211_get_radio_idx_by_chan(struct wiphy *wiphy,
+ const struct ieee80211_channel *chan);
+
/**
* cfg80211_stop_iface - trigger interface disconnection
@@ -9736,6 +9917,11 @@ void cfg80211_links_removed(struct net_device *dev, u16 link_mask);
* struct cfg80211_mlo_reconf_done_data - MLO reconfiguration data
* @buf: MLO Reconfiguration Response frame (header + body)
* @len: length of the frame data
+ * @driver_initiated: Indicates whether the add links request is initiated by
+ * driver. This is set to true when the link reconfiguration request
+ * initiated by driver due to AP link recommendation requests
+ * (Ex: BTM (BSS Transition Management) request) handling offloaded to
+ * driver.
* @added_links: BIT mask of links successfully added to the association
* @links: per-link information indexed by link ID
* @links.bss: the BSS that MLO reconfiguration was requested for, ownership of
@@ -9748,6 +9934,7 @@ void cfg80211_links_removed(struct net_device *dev, u16 link_mask);
struct cfg80211_mlo_reconf_done_data {
const u8 *buf;
size_t len;
+ bool driver_initiated;
u16 added_links;
struct {
struct cfg80211_bss *bss;
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 0091f23a40f7..93640a29427c 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -118,6 +118,8 @@ struct devlink_rate {
u32 tx_priority;
u32 tx_weight;
+
+ u32 tc_bw[DEVLINK_RATE_TCS_MAX];
};
struct devlink_port {
@@ -423,6 +425,7 @@ enum devlink_param_type {
DEVLINK_PARAM_TYPE_U8 = DEVLINK_VAR_ATTR_TYPE_U8,
DEVLINK_PARAM_TYPE_U16 = DEVLINK_VAR_ATTR_TYPE_U16,
DEVLINK_PARAM_TYPE_U32 = DEVLINK_VAR_ATTR_TYPE_U32,
+ DEVLINK_PARAM_TYPE_U64 = DEVLINK_VAR_ATTR_TYPE_U64,
DEVLINK_PARAM_TYPE_STRING = DEVLINK_VAR_ATTR_TYPE_STRING,
DEVLINK_PARAM_TYPE_BOOL = DEVLINK_VAR_ATTR_TYPE_FLAG,
};
@@ -431,6 +434,7 @@ union devlink_param_value {
u8 vu8;
u16 vu16;
u32 vu32;
+ u64 vu64;
char vstr[__DEVLINK_PARAM_MAX_STRING_VALUE];
bool vbool;
};
@@ -520,6 +524,8 @@ enum devlink_param_generic_id {
DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP,
DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
+ DEVLINK_PARAM_GENERIC_ID_CLOCK_ID,
/* add new param generic ids above here*/
__DEVLINK_PARAM_GENERIC_ID_MAX,
@@ -578,6 +584,12 @@ enum devlink_param_generic_id {
#define DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_NAME "event_eq_size"
#define DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_TYPE DEVLINK_PARAM_TYPE_U32
+#define DEVLINK_PARAM_GENERIC_ENABLE_PHC_NAME "enable_phc"
+#define DEVLINK_PARAM_GENERIC_ENABLE_PHC_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_CLOCK_ID_NAME "clock_id"
+#define DEVLINK_PARAM_GENERIC_CLOCK_ID_TYPE DEVLINK_PARAM_TYPE_U64
+
#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
{ \
.id = DEVLINK_PARAM_GENERIC_ID_##_id, \
@@ -1482,6 +1494,9 @@ struct devlink_ops {
u32 tx_priority, struct netlink_ext_ack *extack);
int (*rate_leaf_tx_weight_set)(struct devlink_rate *devlink_rate, void *priv,
u32 tx_weight, struct netlink_ext_ack *extack);
+ int (*rate_leaf_tc_bw_set)(struct devlink_rate *devlink_rate,
+ void *priv, u32 *tc_bw,
+ struct netlink_ext_ack *extack);
int (*rate_node_tx_share_set)(struct devlink_rate *devlink_rate, void *priv,
u64 tx_share, struct netlink_ext_ack *extack);
int (*rate_node_tx_max_set)(struct devlink_rate *devlink_rate, void *priv,
@@ -1490,6 +1505,9 @@ struct devlink_ops {
u32 tx_priority, struct netlink_ext_ack *extack);
int (*rate_node_tx_weight_set)(struct devlink_rate *devlink_rate, void *priv,
u32 tx_weight, struct netlink_ext_ack *extack);
+ int (*rate_node_tc_bw_set)(struct devlink_rate *devlink_rate,
+ void *priv, u32 *tc_bw,
+ struct netlink_ext_ack *extack);
int (*rate_node_new)(struct devlink_rate *rate_node, void **priv,
struct netlink_ext_ack *extack);
int (*rate_node_del)(struct devlink_rate *rate_node, void *priv,
diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h
index bcf9d7467e1a..d8ff24a33459 100644
--- a/include/net/dropreason-core.h
+++ b/include/net/dropreason-core.h
@@ -45,6 +45,7 @@
FN(TCP_LISTEN_OVERFLOW) \
FN(TCP_OLD_SEQUENCE) \
FN(TCP_INVALID_SEQUENCE) \
+ FN(TCP_INVALID_END_SEQUENCE) \
FN(TCP_INVALID_ACK_SEQUENCE) \
FN(TCP_RESET) \
FN(TCP_INVALID_SYN) \
@@ -121,6 +122,11 @@
FN(ARP_PVLAN_DISABLE) \
FN(MAC_IEEE_MAC_CONTROL) \
FN(BRIDGE_INGRESS_STP_STATE) \
+ FN(CAN_RX_INVALID_FRAME) \
+ FN(CANFD_RX_INVALID_FRAME) \
+ FN(CANXL_RX_INVALID_FRAME) \
+ FN(PFMEMALLOC) \
+ FN(DUALPI2_STEP_DROP) \
FNe(MAX)
/**
@@ -300,9 +306,15 @@ enum skb_drop_reason {
SKB_DROP_REASON_TCP_LISTEN_OVERFLOW,
/** @SKB_DROP_REASON_TCP_OLD_SEQUENCE: Old SEQ field (duplicate packet) */
SKB_DROP_REASON_TCP_OLD_SEQUENCE,
- /** @SKB_DROP_REASON_TCP_INVALID_SEQUENCE: Not acceptable SEQ field */
+ /** @SKB_DROP_REASON_TCP_INVALID_SEQUENCE: Not acceptable SEQ field. */
SKB_DROP_REASON_TCP_INVALID_SEQUENCE,
/**
+ * @SKB_DROP_REASON_TCP_INVALID_END_SEQUENCE:
+ * Not acceptable END_SEQ field.
+ * Corresponds to LINUX_MIB_BEYOND_WINDOW.
+ */
+ SKB_DROP_REASON_TCP_INVALID_END_SEQUENCE,
+ /**
* @SKB_DROP_REASON_TCP_INVALID_ACK_SEQUENCE: Not acceptable ACK SEQ
* field because ack sequence is not in the window between snd_una
* and snd_nxt
@@ -574,6 +586,31 @@ enum skb_drop_reason {
*/
SKB_DROP_REASON_BRIDGE_INGRESS_STP_STATE,
/**
+ * @SKB_DROP_REASON_CAN_RX_INVALID_FRAME: received
+ * non conform CAN frame (or device is unable to receive CAN frames)
+ */
+ SKB_DROP_REASON_CAN_RX_INVALID_FRAME,
+ /**
+ * @SKB_DROP_REASON_CANFD_RX_INVALID_FRAME: received
+ * non conform CAN-FD frame (or device is unable to receive CAN frames)
+ */
+ SKB_DROP_REASON_CANFD_RX_INVALID_FRAME,
+ /**
+ * @SKB_DROP_REASON_CANXL_RX_INVALID_FRAME: received
+ * non conform CAN-XL frame (or device is unable to receive CAN frames)
+ */
+ SKB_DROP_REASON_CANXL_RX_INVALID_FRAME,
+ /**
+ * @SKB_DROP_REASON_PFMEMALLOC: packet allocated from memory reserve
+ * reached a path or socket not eligible for use of memory reserves
+ */
+ SKB_DROP_REASON_PFMEMALLOC,
+ /**
+ * @SKB_DROP_REASON_DUALPI2_STEP_DROP: dropped by the step drop
+ * threshold of DualPI2 qdisc.
+ */
+ SKB_DROP_REASON_DUALPI2_STEP_DROP,
+ /**
* @SKB_DROP_REASON_MAX: the maximum of core drop reasons, which
* shouldn't be used as a real 'reason' - only for tracing code gen
*/
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 55e2d97f247e..d73ea0880066 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -54,11 +54,13 @@ struct tc_action;
#define DSA_TAG_PROTO_RZN1_A5PSW_VALUE 26
#define DSA_TAG_PROTO_LAN937X_VALUE 27
#define DSA_TAG_PROTO_VSC73XX_8021Q_VALUE 28
+#define DSA_TAG_PROTO_BRCM_LEGACY_FCS_VALUE 29
enum dsa_tag_protocol {
DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
DSA_TAG_PROTO_BRCM = DSA_TAG_PROTO_BRCM_VALUE,
DSA_TAG_PROTO_BRCM_LEGACY = DSA_TAG_PROTO_BRCM_LEGACY_VALUE,
+ DSA_TAG_PROTO_BRCM_LEGACY_FCS = DSA_TAG_PROTO_BRCM_LEGACY_FCS_VALUE,
DSA_TAG_PROTO_BRCM_PREPEND = DSA_TAG_PROTO_BRCM_PREPEND_VALUE,
DSA_TAG_PROTO_DSA = DSA_TAG_PROTO_DSA_VALUE,
DSA_TAG_PROTO_EDSA = DSA_TAG_PROTO_EDSA_VALUE,
diff --git a/include/net/dst.h b/include/net/dst.h
index 78c78cdce0e9..00467c1b5093 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -240,9 +240,9 @@ static inline void dst_hold(struct dst_entry *dst)
static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
{
- if (unlikely(time != dst->lastuse)) {
+ if (unlikely(time != READ_ONCE(dst->lastuse))) {
dst->__use++;
- dst->lastuse = time;
+ WRITE_ONCE(dst->lastuse, time);
}
}
@@ -431,13 +431,15 @@ static inline void dst_link_failure(struct sk_buff *skb)
static inline void dst_set_expires(struct dst_entry *dst, int timeout)
{
- unsigned long expires = jiffies + timeout;
+ unsigned long old, expires = jiffies + timeout;
if (expires == 0)
expires = 1;
- if (dst->expires == 0 || time_before(expires, dst->expires))
- dst->expires = expires;
+ old = READ_ONCE(dst->expires);
+
+ if (!old || time_before(expires, old))
+ WRITE_ONCE(dst->expires, expires);
}
static inline unsigned int dst_dev_overhead(struct dst_entry *dst,
@@ -456,7 +458,7 @@ INDIRECT_CALLABLE_DECLARE(int ip_output(struct net *, struct sock *,
/* Output packet to network from transport. */
static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- return INDIRECT_CALL_INET(skb_dst(skb)->output,
+ return INDIRECT_CALL_INET(READ_ONCE(skb_dst(skb)->output),
ip6_output, ip_output,
net, sk, skb);
}
@@ -466,7 +468,7 @@ INDIRECT_CALLABLE_DECLARE(int ip_local_deliver(struct sk_buff *));
/* Input packet from network to transport. */
static inline int dst_input(struct sk_buff *skb)
{
- return INDIRECT_CALL_INET(skb_dst(skb)->input,
+ return INDIRECT_CALL_INET(READ_ONCE(skb_dst(skb)->input),
ip6_input, ip_local_deliver, skb);
}
@@ -476,7 +478,7 @@ INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
u32));
static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
{
- if (dst->obsolete)
+ if (READ_ONCE(dst->obsolete))
dst = INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check,
ipv4_dst_check, dst, cookie);
return dst;
@@ -561,6 +563,26 @@ static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
}
+static inline struct net_device *dst_dev(const struct dst_entry *dst)
+{
+ return READ_ONCE(dst->dev);
+}
+
+static inline struct net_device *skb_dst_dev(const struct sk_buff *skb)
+{
+ return dst_dev(skb_dst(skb));
+}
+
+static inline struct net *skb_dst_dev_net(const struct sk_buff *skb)
+{
+ return dev_net(skb_dst_dev(skb));
+}
+
+static inline struct net *skb_dst_dev_net_rcu(const struct sk_buff *skb)
+{
+ return dev_net_rcu(skb_dst_dev(skb));
+}
+
struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie);
void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu, bool confirm_neigh);
diff --git a/include/net/gro.h b/include/net/gro.h
index 22d3a69e4404..a0fca7ac6e7e 100644
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -534,6 +534,12 @@ static inline void gro_normal_list(struct gro_node *gro)
gro->rx_count = 0;
}
+static inline void gro_flush_normal(struct gro_node *gro, bool flush_old)
+{
+ gro_flush(gro, flush_old);
+ gro_normal_list(gro);
+}
+
/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
* pass the whole batch up to the stack.
*/
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index c32878c69179..ab3929a2a956 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -150,7 +150,7 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
int iif, int sdif,
bool *refcounted)
{
- struct net *net = dev_net_rcu(skb_dst(skb)->dev);
+ struct net *net = skb_dst_dev_net_rcu(skb);
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
struct sock *sk;
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 4564b5d348b1..19dbd9081d5a 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -202,12 +202,6 @@ static inline spinlock_t *inet_ehash_lockp(
int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo);
-static inline void inet_hashinfo2_free_mod(struct inet_hashinfo *h)
-{
- kfree(h->lhash2);
- h->lhash2 = NULL;
-}
-
static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
{
kvfree(hashinfo->ehash_locks);
@@ -487,7 +481,7 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
const int sdif,
bool *refcounted)
{
- struct net *net = dev_net_rcu(skb_dst(skb)->dev);
+ struct net *net = skb_dst_dev_net_rcu(skb);
const struct iphdr *iph = ip_hdr(skb);
struct sock *sk;
diff --git a/include/net/ip.h b/include/net/ip.h
index 47ed6d23853d..befcba575129 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -59,6 +59,7 @@ struct inet_skb_parm {
#define IPSKB_L3SLAVE BIT(7)
#define IPSKB_NOPOLICY BIT(8)
#define IPSKB_MULTIPATH BIT(9)
+#define IPSKB_MCROUTE BIT(10)
u16 frag_max_size;
};
@@ -167,6 +168,7 @@ void ip_list_rcv(struct list_head *head, struct packet_type *pt,
int ip_local_deliver(struct sk_buff *skb);
void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int proto);
int ip_mr_input(struct sk_buff *skb);
+int ip_mr_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
@@ -470,12 +472,12 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
rcu_read_lock();
- net = dev_net_rcu(dst->dev);
+ net = dev_net_rcu(dst_dev(dst));
if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) ||
ip_mtu_locked(dst) ||
!forwarding) {
mtu = rt->rt_pmtu;
- if (mtu && time_before(jiffies, rt->dst.expires))
+ if (mtu && time_before(jiffies, READ_ONCE(rt->dst.expires)))
goto out;
}
@@ -484,7 +486,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
if (mtu)
goto out;
- mtu = READ_ONCE(dst->dev->mtu);
+ mtu = READ_ONCE(dst_dev(dst)->mtu);
if (unlikely(ip_mtu_locked(dst))) {
if (rt->rt_uses_gateway && mtu > 576)
@@ -504,16 +506,17 @@ out:
static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
const struct sk_buff *skb)
{
+ const struct dst_entry *dst = skb_dst(skb);
unsigned int mtu;
if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
- return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
+ return ip_dst_mtu_maybe_forward(dst, forwarding);
}
- mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
- return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
+ mtu = min(READ_ONCE(dst_dev(dst)->mtu), IP_MAX_MTU);
+ return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
}
struct dst_metrics *ip_fib_metrics_init(struct nlattr *fc_mx, int fc_mx_len,
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 6dbdf60b342f..9255f21818ee 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -274,7 +274,7 @@ static inline unsigned int ip6_skb_dst_mtu(const struct sk_buff *skb)
unsigned int mtu;
if (np && READ_ONCE(np->pmtudisc) >= IPV6_PMTUDISC_PROBE) {
- mtu = READ_ONCE(dst->dev->mtu);
+ mtu = READ_ONCE(dst_dev(dst)->mtu);
mtu -= lwtunnel_headroom(dst->lwtstate, mtu);
} else {
mtu = dst_mtu(dst);
@@ -337,7 +337,7 @@ static inline unsigned int ip6_dst_mtu_maybe_forward(const struct dst_entry *dst
mtu = IPV6_MIN_MTU;
rcu_read_lock();
- idev = __in6_dev_get(dst->dev);
+ idev = __in6_dev_get(dst_dev(dst));
if (idev)
mtu = READ_ONCE(idev->cnf.mtu6);
rcu_read_unlock();
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 399592405c72..120db2865811 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -152,13 +152,14 @@ int ip6_tnl_get_iflink(const struct net_device *dev);
int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu);
static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
- struct net_device *dev)
+ struct net_device *dev, u16 ip6cb_flags)
{
int pkt_len, err;
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
+ IP6CB(skb)->flags = ip6cb_flags;
pkt_len = skb->len - skb_inner_network_offset(skb);
- err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
+ err = ip6_local_out(skb_dst_dev_net(skb), sk, skb);
if (dev) {
if (unlikely(net_xmit_eval(err)))
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 0c3d571a04a1..8cf1380f3656 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -603,7 +603,7 @@ static inline int iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, u8 proto,
- u8 tos, u8 ttl, __be16 df, bool xnet);
+ u8 tos, u8 ttl, __be16 df, bool xnet, u16 ipcb_flags);
struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
gfp_t flags);
int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst,
diff --git a/include/net/libeth/rx.h b/include/net/libeth/rx.h
index ab05024be518..5d991404845e 100644
--- a/include/net/libeth/rx.h
+++ b/include/net/libeth/rx.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (C) 2024 Intel Corporation */
+/* Copyright (C) 2024-2025 Intel Corporation */
#ifndef __LIBETH_RX_H
#define __LIBETH_RX_H
@@ -13,8 +13,10 @@
/* Space reserved in front of each frame */
#define LIBETH_SKB_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
+#define LIBETH_XDP_HEADROOM (ALIGN(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \
+ NET_IP_ALIGN)
/* Maximum headroom for worst-case calculations */
-#define LIBETH_MAX_HEADROOM LIBETH_SKB_HEADROOM
+#define LIBETH_MAX_HEADROOM LIBETH_XDP_HEADROOM
/* Link layer / L2 overhead: Ethernet, 2 VLAN tags (C + S), FCS */
#define LIBETH_RX_LL_LEN (ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN)
/* Maximum supported L2-L4 header length */
@@ -31,7 +33,7 @@
/**
* struct libeth_fqe - structure representing an Rx buffer (fill queue element)
- * @page: page holding the buffer
+ * @netmem: network memory reference holding the buffer
* @offset: offset from the page start (to the headroom)
* @truesize: total space occupied by the buffer (w/ headroom and tailroom)
*
@@ -40,7 +42,7 @@
* former, @offset is always 0 and @truesize is always ```PAGE_SIZE```.
*/
struct libeth_fqe {
- struct page *page;
+ netmem_ref netmem;
u32 offset;
u32 truesize;
} __aligned_largest;
@@ -66,6 +68,7 @@ enum libeth_fqe_type {
* @count: number of descriptors/buffers the queue has
* @type: type of the buffers this queue has
* @hsplit: flag whether header split is enabled
+ * @xdp: flag indicating whether XDP is enabled
* @buf_len: HW-writeable length per each buffer
* @nid: ID of the closest NUMA node with memory
*/
@@ -81,6 +84,7 @@ struct libeth_fq {
/* Cold fields */
enum libeth_fqe_type type:2;
bool hsplit:1;
+ bool xdp:1;
u32 buf_len;
int nid;
@@ -102,15 +106,16 @@ static inline dma_addr_t libeth_rx_alloc(const struct libeth_fq_fp *fq, u32 i)
struct libeth_fqe *buf = &fq->fqes[i];
buf->truesize = fq->truesize;
- buf->page = page_pool_dev_alloc(fq->pp, &buf->offset, &buf->truesize);
- if (unlikely(!buf->page))
+ buf->netmem = page_pool_dev_alloc_netmem(fq->pp, &buf->offset,
+ &buf->truesize);
+ if (unlikely(!buf->netmem))
return DMA_MAPPING_ERROR;
- return page_pool_get_dma_addr(buf->page) + buf->offset +
+ return page_pool_get_dma_addr_netmem(buf->netmem) + buf->offset +
fq->pp->p.offset;
}
-void libeth_rx_recycle_slow(struct page *page);
+void libeth_rx_recycle_slow(netmem_ref netmem);
/**
* libeth_rx_sync_for_cpu - synchronize or recycle buffer post DMA
@@ -126,18 +131,19 @@ void libeth_rx_recycle_slow(struct page *page);
static inline bool libeth_rx_sync_for_cpu(const struct libeth_fqe *fqe,
u32 len)
{
- struct page *page = fqe->page;
+ netmem_ref netmem = fqe->netmem;
/* Very rare, but possible case. The most common reason:
* the last fragment contained FCS only, which was then
* stripped by the HW.
*/
if (unlikely(!len)) {
- libeth_rx_recycle_slow(page);
+ libeth_rx_recycle_slow(netmem);
return false;
}
- page_pool_dma_sync_for_cpu(page->pp, page, fqe->offset, len);
+ page_pool_dma_sync_netmem_for_cpu(netmem_get_pp(netmem), netmem,
+ fqe->offset, len);
return true;
}
diff --git a/include/net/libeth/tx.h b/include/net/libeth/tx.h
index 35614f9523f6..c3db5c6f1641 100644
--- a/include/net/libeth/tx.h
+++ b/include/net/libeth/tx.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (C) 2024 Intel Corporation */
+/* Copyright (C) 2024-2025 Intel Corporation */
#ifndef __LIBETH_TX_H
#define __LIBETH_TX_H
@@ -12,11 +12,17 @@
/**
* enum libeth_sqe_type - type of &libeth_sqe to act on Tx completion
- * @LIBETH_SQE_EMPTY: unused/empty, no action required
+ * @LIBETH_SQE_EMPTY: unused/empty OR XDP_TX/XSk frame, no action required
* @LIBETH_SQE_CTX: context descriptor with empty SQE, no action required
* @LIBETH_SQE_SLAB: kmalloc-allocated buffer, unmap and kfree()
* @LIBETH_SQE_FRAG: mapped skb frag, only unmap DMA
* @LIBETH_SQE_SKB: &sk_buff, unmap and napi_consume_skb(), update stats
+ * @__LIBETH_SQE_XDP_START: separator between skb and XDP types
+ * @LIBETH_SQE_XDP_TX: &skb_shared_info, libeth_xdp_return_buff_bulk(), stats
+ * @LIBETH_SQE_XDP_XMIT: &xdp_frame, unmap and xdp_return_frame_bulk(), stats
+ * @LIBETH_SQE_XDP_XMIT_FRAG: &xdp_frame frag, only unmap DMA
+ * @LIBETH_SQE_XSK_TX: &libeth_xdp_buff on XSk queue, xsk_buff_free(), stats
+ * @LIBETH_SQE_XSK_TX_FRAG: &libeth_xdp_buff frag on XSk queue, xsk_buff_free()
*/
enum libeth_sqe_type {
LIBETH_SQE_EMPTY = 0U,
@@ -24,6 +30,13 @@ enum libeth_sqe_type {
LIBETH_SQE_SLAB,
LIBETH_SQE_FRAG,
LIBETH_SQE_SKB,
+
+ __LIBETH_SQE_XDP_START,
+ LIBETH_SQE_XDP_TX = __LIBETH_SQE_XDP_START,
+ LIBETH_SQE_XDP_XMIT,
+ LIBETH_SQE_XDP_XMIT_FRAG,
+ LIBETH_SQE_XSK_TX,
+ LIBETH_SQE_XSK_TX_FRAG,
};
/**
@@ -32,6 +45,9 @@ enum libeth_sqe_type {
* @rs_idx: index of the last buffer from the batch this one was sent in
* @raw: slab buffer to free via kfree()
* @skb: &sk_buff to consume
+ * @sinfo: skb shared info of an XDP_TX frame
+ * @xdpf: XDP frame from ::ndo_xdp_xmit()
+ * @xsk: XSk Rx frame from XDP_TX action
* @dma: DMA address to unmap
* @len: length of the mapped region to unmap
* @nr_frags: number of frags in the frame this buffer belongs to
@@ -46,6 +62,9 @@ struct libeth_sqe {
union {
void *raw;
struct sk_buff *skb;
+ struct skb_shared_info *sinfo;
+ struct xdp_frame *xdpf;
+ struct libeth_xdp_buff *xsk;
};
DEFINE_DMA_UNMAP_ADDR(dma);
@@ -71,7 +90,10 @@ struct libeth_sqe {
/**
* struct libeth_cq_pp - completion queue poll params
* @dev: &device to perform DMA unmapping
+ * @bq: XDP frame bulk to combine return operations
* @ss: onstack NAPI stats to fill
+ * @xss: onstack XDPSQ NAPI stats to fill
+ * @xdp_tx: number of XDP-not-XSk frames processed
* @napi: whether it's called from the NAPI context
*
* libeth uses this structure to access objects needed for performing full
@@ -80,7 +102,13 @@ struct libeth_sqe {
*/
struct libeth_cq_pp {
struct device *dev;
- struct libeth_sq_napi_stats *ss;
+ struct xdp_frame_bulk *bq;
+
+ union {
+ struct libeth_sq_napi_stats *ss;
+ struct libeth_xdpsq_napi_stats *xss;
+ };
+ u32 xdp_tx;
bool napi;
};
@@ -126,4 +154,6 @@ static inline void libeth_tx_complete(struct libeth_sqe *sqe,
sqe->type = LIBETH_SQE_EMPTY;
}
+void libeth_tx_complete_any(struct libeth_sqe *sqe, struct libeth_cq_pp *cp);
+
#endif /* __LIBETH_TX_H */
diff --git a/include/net/libeth/types.h b/include/net/libeth/types.h
index 603825e45133..cf1d78a9dc38 100644
--- a/include/net/libeth/types.h
+++ b/include/net/libeth/types.h
@@ -1,10 +1,32 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (C) 2024 Intel Corporation */
+/* Copyright (C) 2024-2025 Intel Corporation */
#ifndef __LIBETH_TYPES_H
#define __LIBETH_TYPES_H
-#include <linux/types.h>
+#include <linux/workqueue.h>
+
+/* Stats */
+
+/**
+ * struct libeth_rq_napi_stats - "hot" counters to update in Rx polling loop
+ * @packets: received frames counter
+ * @bytes: sum of bytes of received frames above
+ * @fragments: sum of fragments of received S/G frames
+ * @hsplit: number of frames the device performed the header split for
+ * @raw: alias to access all the fields as an array
+ */
+struct libeth_rq_napi_stats {
+ union {
+ struct {
+ u32 packets;
+ u32 bytes;
+ u32 fragments;
+ u32 hsplit;
+ };
+ DECLARE_FLEX_ARRAY(u32, raw);
+ };
+};
/**
* struct libeth_sq_napi_stats - "hot" counters to update in Tx completion loop
@@ -22,4 +44,84 @@ struct libeth_sq_napi_stats {
};
};
+/**
+ * struct libeth_xdpsq_napi_stats - "hot" counters to update in XDP Tx
+ * completion loop
+ * @packets: completed frames counter
+ * @bytes: sum of bytes of completed frames above
+ * @fragments: sum of fragments of completed S/G frames
+ * @raw: alias to access all the fields as an array
+ */
+struct libeth_xdpsq_napi_stats {
+ union {
+ struct {
+ u32 packets;
+ u32 bytes;
+ u32 fragments;
+ };
+ DECLARE_FLEX_ARRAY(u32, raw);
+ };
+};
+
+/* XDP */
+
+/*
+ * The following structures should be embedded into driver's queue structure
+ * and passed to the libeth_xdp helpers, never used directly.
+ */
+
+/* XDPSQ sharing */
+
+/**
+ * struct libeth_xdpsq_lock - locking primitive for sharing XDPSQs
+ * @lock: spinlock for locking the queue
+ * @share: whether this particular queue is shared
+ */
+struct libeth_xdpsq_lock {
+ spinlock_t lock;
+ bool share;
+};
+
+/* XDPSQ clean-up timers */
+
+/**
+ * struct libeth_xdpsq_timer - timer for cleaning up XDPSQs w/o interrupts
+ * @xdpsq: queue this timer belongs to
+ * @lock: lock for the queue
+ * @dwork: work performing cleanups
+ *
+ * XDPSQs not using interrupts but lazy cleaning, i.e. only when there's no
+ * space for sending the current queued frame/bulk, must fire up timers to
+ * make sure there are no stale buffers to free.
+ */
+struct libeth_xdpsq_timer {
+ void *xdpsq;
+ struct libeth_xdpsq_lock *lock;
+
+ struct delayed_work dwork;
+};
+
+/* Rx polling path */
+
+/**
+ * struct libeth_xdp_buff_stash - struct for stashing &xdp_buff onto a queue
+ * @data: pointer to the start of the frame, xdp_buff.data
+ * @headroom: frame headroom, xdp_buff.data - xdp_buff.data_hard_start
+ * @len: frame linear space length, xdp_buff.data_end - xdp_buff.data
+ * @frame_sz: truesize occupied by the frame, xdp_buff.frame_sz
+ * @flags: xdp_buff.flags
+ *
+ * &xdp_buff is 56 bytes long on x64, &libeth_xdp_buff is 64 bytes. This
+ * structure carries only necessary fields to save/restore a partially built
+ * frame on the queue structure to finish it during the next NAPI poll.
+ */
+struct libeth_xdp_buff_stash {
+ void *data;
+ u16 headroom;
+ u16 len;
+
+ u32 frame_sz:24;
+ u32 flags:8;
+} __aligned_largest;
+
#endif /* __LIBETH_TYPES_H */
diff --git a/include/net/libeth/xdp.h b/include/net/libeth/xdp.h
new file mode 100644
index 000000000000..f4880b50e804
--- /dev/null
+++ b/include/net/libeth/xdp.h
@@ -0,0 +1,1879 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef __LIBETH_XDP_H
+#define __LIBETH_XDP_H
+
+#include <linux/bpf_trace.h>
+#include <linux/unroll.h>
+
+#include <net/libeth/rx.h>
+#include <net/libeth/tx.h>
+#include <net/xsk_buff_pool.h>
+
+/*
+ * Defined as bits to be able to use them as a mask on Rx.
+ * Also used as internal return values on Tx.
+ */
+enum {
+ LIBETH_XDP_PASS = 0U,
+ LIBETH_XDP_DROP = BIT(0),
+ LIBETH_XDP_ABORTED = BIT(1),
+ LIBETH_XDP_TX = BIT(2),
+ LIBETH_XDP_REDIRECT = BIT(3),
+};
+
+/*
+ * &xdp_buff_xsk is the largest structure &libeth_xdp_buff gets casted to,
+ * pick maximum pointer-compatible alignment.
+ */
+#define __LIBETH_XDP_BUFF_ALIGN \
+ (IS_ALIGNED(sizeof(struct xdp_buff_xsk), 16) ? 16 : \
+ IS_ALIGNED(sizeof(struct xdp_buff_xsk), 8) ? 8 : \
+ sizeof(long))
+
+/**
+ * struct libeth_xdp_buff - libeth extension over &xdp_buff
+ * @base: main &xdp_buff
+ * @data: shortcut for @base.data
+ * @desc: RQ descriptor containing metadata for this buffer
+ * @priv: driver-private scratchspace
+ *
+ * The main reason for this is to have a pointer to the descriptor to be able
+ * to quickly get frame metadata from xdpmo and driver buff-to-xdp callbacks
+ * (as well as bigger alignment).
+ * Pointer/layout-compatible with &xdp_buff and &xdp_buff_xsk.
+ */
+struct libeth_xdp_buff {
+ union {
+ struct xdp_buff base;
+ void *data;
+ };
+
+ const void *desc;
+ unsigned long priv[]
+ __aligned(__LIBETH_XDP_BUFF_ALIGN);
+} __aligned(__LIBETH_XDP_BUFF_ALIGN);
+static_assert(offsetof(struct libeth_xdp_buff, data) ==
+ offsetof(struct xdp_buff_xsk, xdp.data));
+static_assert(offsetof(struct libeth_xdp_buff, desc) ==
+ offsetof(struct xdp_buff_xsk, cb));
+static_assert(IS_ALIGNED(sizeof(struct xdp_buff_xsk),
+ __alignof(struct libeth_xdp_buff)));
+
+/**
+ * __LIBETH_XDP_ONSTACK_BUFF - declare a &libeth_xdp_buff on the stack
+ * @name: name of the variable to declare
+ * @...: sizeof() of the driver-private data
+ */
+#define __LIBETH_XDP_ONSTACK_BUFF(name, ...) \
+ ___LIBETH_XDP_ONSTACK_BUFF(name, ##__VA_ARGS__)
+/**
+ * LIBETH_XDP_ONSTACK_BUFF - declare a &libeth_xdp_buff on the stack
+ * @name: name of the variable to declare
+ * @...: type or variable name of the driver-private data
+ */
+#define LIBETH_XDP_ONSTACK_BUFF(name, ...) \
+ __LIBETH_XDP_ONSTACK_BUFF(name, __libeth_xdp_priv_sz(__VA_ARGS__))
+
+#define ___LIBETH_XDP_ONSTACK_BUFF(name, ...) \
+ __DEFINE_FLEX(struct libeth_xdp_buff, name, priv, \
+ LIBETH_XDP_PRIV_SZ(__VA_ARGS__ + 0), \
+ __uninitialized); \
+ LIBETH_XDP_ASSERT_PRIV_SZ(__VA_ARGS__ + 0)
+
+#define __libeth_xdp_priv_sz(...) \
+ CONCATENATE(__libeth_xdp_psz, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)
+
+#define __libeth_xdp_psz0(...)
+#define __libeth_xdp_psz1(...) sizeof(__VA_ARGS__)
+
+#define LIBETH_XDP_PRIV_SZ(sz) \
+ (ALIGN(sz, __alignof(struct libeth_xdp_buff)) / sizeof(long))
+
+/* Performs XSK_CHECK_PRIV_TYPE() */
+#define LIBETH_XDP_ASSERT_PRIV_SZ(sz) \
+ static_assert(offsetofend(struct xdp_buff_xsk, cb) >= \
+ struct_size_t(struct libeth_xdp_buff, priv, \
+ LIBETH_XDP_PRIV_SZ(sz)))
+
+/* XDPSQ sharing */
+
+DECLARE_STATIC_KEY_FALSE(libeth_xdpsq_share);
+
+/**
+ * libeth_xdpsq_num - calculate optimal number of XDPSQs for this device + sys
+ * @rxq: current number of active Rx queues
+ * @txq: current number of active Tx queues
+ * @max: maximum number of Tx queues
+ *
+ * Each RQ must have its own XDPSQ for XSk pairs, each CPU must have own XDPSQ
+ * for lockless sending (``XDP_TX``, .ndo_xdp_xmit()). Cap the maximum of these
+ * two with the number of SQs the device can have (minus used ones).
+ *
+ * Return: number of XDP Tx queues the device needs to use.
+ */
+static inline u32 libeth_xdpsq_num(u32 rxq, u32 txq, u32 max)
+{
+ return min(max(nr_cpu_ids, rxq), max - txq);
+}
+
+/**
+ * libeth_xdpsq_shared - whether XDPSQs can be shared between several CPUs
+ * @num: number of active XDPSQs
+ *
+ * Return: true if there's no 1:1 XDPSQ/CPU association, false otherwise.
+ */
+static inline bool libeth_xdpsq_shared(u32 num)
+{
+ return num < nr_cpu_ids;
+}
+
+/**
+ * libeth_xdpsq_id - get XDPSQ index corresponding to this CPU
+ * @num: number of active XDPSQs
+ *
+ * Helper for libeth_xdp routines, do not use in drivers directly.
+ *
+ * Return: XDPSQ index needs to be used on this CPU.
+ */
+static inline u32 libeth_xdpsq_id(u32 num)
+{
+ u32 ret = raw_smp_processor_id();
+
+ if (static_branch_unlikely(&libeth_xdpsq_share) &&
+ libeth_xdpsq_shared(num))
+ ret %= num;
+
+ return ret;
+}
+
+void __libeth_xdpsq_get(struct libeth_xdpsq_lock *lock,
+ const struct net_device *dev);
+void __libeth_xdpsq_put(struct libeth_xdpsq_lock *lock,
+ const struct net_device *dev);
+
+/**
+ * libeth_xdpsq_get - initialize &libeth_xdpsq_lock
+ * @lock: lock to initialize
+ * @dev: netdev which this lock belongs to
+ * @share: whether XDPSQs can be shared
+ *
+ * Tracks the current XDPSQ association and enables the static lock
+ * if needed.
+ */
+static inline void libeth_xdpsq_get(struct libeth_xdpsq_lock *lock,
+ const struct net_device *dev,
+ bool share)
+{
+ if (unlikely(share))
+ __libeth_xdpsq_get(lock, dev);
+}
+
+/**
+ * libeth_xdpsq_put - deinitialize &libeth_xdpsq_lock
+ * @lock: lock to deinitialize
+ * @dev: netdev which this lock belongs to
+ *
+ * Tracks the current XDPSQ association and disables the static lock
+ * if needed.
+ */
+static inline void libeth_xdpsq_put(struct libeth_xdpsq_lock *lock,
+ const struct net_device *dev)
+{
+ if (static_branch_unlikely(&libeth_xdpsq_share) && lock->share)
+ __libeth_xdpsq_put(lock, dev);
+}
+
+void __libeth_xdpsq_lock(struct libeth_xdpsq_lock *lock);
+void __libeth_xdpsq_unlock(struct libeth_xdpsq_lock *lock);
+
+/**
+ * libeth_xdpsq_lock - grab &libeth_xdpsq_lock if needed
+ * @lock: lock to take
+ *
+ * Touches the underlying spinlock only if the static key is enabled
+ * and the queue itself is marked as shareable.
+ */
+static inline void libeth_xdpsq_lock(struct libeth_xdpsq_lock *lock)
+{
+ if (static_branch_unlikely(&libeth_xdpsq_share) && lock->share)
+ __libeth_xdpsq_lock(lock);
+}
+
+/**
+ * libeth_xdpsq_unlock - free &libeth_xdpsq_lock if needed
+ * @lock: lock to free
+ *
+ * Touches the underlying spinlock only if the static key is enabled
+ * and the queue itself is marked as shareable.
+ */
+static inline void libeth_xdpsq_unlock(struct libeth_xdpsq_lock *lock)
+{
+ if (static_branch_unlikely(&libeth_xdpsq_share) && lock->share)
+ __libeth_xdpsq_unlock(lock);
+}
+
+/* XDPSQ clean-up timers */
+
+void libeth_xdpsq_init_timer(struct libeth_xdpsq_timer *timer, void *xdpsq,
+ struct libeth_xdpsq_lock *lock,
+ void (*poll)(struct work_struct *work));
+
+/**
+ * libeth_xdpsq_deinit_timer - deinitialize &libeth_xdpsq_timer
+ * @timer: timer to deinitialize
+ *
+ * Flush and disable the underlying workqueue.
+ */
+static inline void libeth_xdpsq_deinit_timer(struct libeth_xdpsq_timer *timer)
+{
+ cancel_delayed_work_sync(&timer->dwork);
+}
+
+/**
+ * libeth_xdpsq_queue_timer - run &libeth_xdpsq_timer
+ * @timer: timer to queue
+ *
+ * Should be called after the queue was filled and the transmission was run
+ * to complete the pending buffers if no further sending will be done in a
+ * second (-> lazy cleaning won't happen).
+ * If the timer was already run, it will be requeued back to one second
+ * timeout again.
+ */
+static inline void libeth_xdpsq_queue_timer(struct libeth_xdpsq_timer *timer)
+{
+ mod_delayed_work_on(raw_smp_processor_id(), system_bh_highpri_wq,
+ &timer->dwork, HZ);
+}
+
+/**
+ * libeth_xdpsq_run_timer - wrapper to run a queue clean-up on a timer event
+ * @work: workqueue belonging to the corresponding timer
+ * @poll: driver-specific completion queue poll function
+ *
+ * Run the polling function on the locked queue and requeue the timer if
+ * there's more work to do.
+ * Designed to be used via LIBETH_XDP_DEFINE_TIMER() below.
+ */
+static __always_inline void
+libeth_xdpsq_run_timer(struct work_struct *work,
+ u32 (*poll)(void *xdpsq, u32 budget))
+{
+ struct libeth_xdpsq_timer *timer = container_of(work, typeof(*timer),
+ dwork.work);
+
+ libeth_xdpsq_lock(timer->lock);
+
+ if (poll(timer->xdpsq, U32_MAX))
+ libeth_xdpsq_queue_timer(timer);
+
+ libeth_xdpsq_unlock(timer->lock);
+}
+
+/* Common Tx bits */
+
+/**
+ * enum - libeth_xdp internal Tx flags
+ * @LIBETH_XDP_TX_BULK: one bulk size at which it will be flushed to the queue
+ * @LIBETH_XDP_TX_BATCH: batch size for which the queue fill loop is unrolled
+ * @LIBETH_XDP_TX_DROP: indicates the send function must drop frames not sent
+ * @LIBETH_XDP_TX_NDO: whether the send function is called from .ndo_xdp_xmit()
+ * @LIBETH_XDP_TX_XSK: whether the function is called for ``XDP_TX`` for XSk
+ */
+enum {
+ LIBETH_XDP_TX_BULK = DEV_MAP_BULK_SIZE,
+ LIBETH_XDP_TX_BATCH = 8,
+
+ LIBETH_XDP_TX_DROP = BIT(0),
+ LIBETH_XDP_TX_NDO = BIT(1),
+ LIBETH_XDP_TX_XSK = BIT(2),
+};
+
+/**
+ * enum - &libeth_xdp_tx_frame and &libeth_xdp_tx_desc flags
+ * @LIBETH_XDP_TX_LEN: only for ``XDP_TX``, [15:0] of ::len_fl is actual length
+ * @LIBETH_XDP_TX_CSUM: for XSk xmit, enable checksum offload
+ * @LIBETH_XDP_TX_XSKMD: for XSk xmit, mask of the metadata bits
+ * @LIBETH_XDP_TX_FIRST: indicates the frag is the first one of the frame
+ * @LIBETH_XDP_TX_LAST: whether the frag is the last one of the frame
+ * @LIBETH_XDP_TX_MULTI: whether the frame contains several frags
+ * @LIBETH_XDP_TX_FLAGS: only for ``XDP_TX``, [31:16] of ::len_fl is flags
+ */
+enum {
+ LIBETH_XDP_TX_LEN = GENMASK(15, 0),
+
+ LIBETH_XDP_TX_CSUM = XDP_TXMD_FLAGS_CHECKSUM,
+ LIBETH_XDP_TX_XSKMD = LIBETH_XDP_TX_LEN,
+
+ LIBETH_XDP_TX_FIRST = BIT(16),
+ LIBETH_XDP_TX_LAST = BIT(17),
+ LIBETH_XDP_TX_MULTI = BIT(18),
+
+ LIBETH_XDP_TX_FLAGS = GENMASK(31, 16),
+};
+
+/**
+ * struct libeth_xdp_tx_frame - represents one XDP Tx element
+ * @data: frame start pointer for ``XDP_TX``
+ * @len_fl: ``XDP_TX``, combined flags [31:16] and len [15:0] field for speed
+ * @soff: ``XDP_TX``, offset from @data to the start of &skb_shared_info
+ * @frag: one (non-head) frag for ``XDP_TX``
+ * @xdpf: &xdp_frame for the head frag for .ndo_xdp_xmit()
+ * @dma: DMA address of the non-head frag for .ndo_xdp_xmit()
+ * @xsk: ``XDP_TX`` for XSk, XDP buffer for any frag
+ * @len: frag length for XSk ``XDP_TX`` and .ndo_xdp_xmit()
+ * @flags: Tx flags for the above
+ * @opts: combined @len + @flags for the above for speed
+ * @desc: XSk xmit descriptor for direct casting
+ */
+struct libeth_xdp_tx_frame {
+ union {
+ /* ``XDP_TX`` */
+ struct {
+ void *data;
+ u32 len_fl;
+ u32 soff;
+ };
+
+ /* ``XDP_TX`` frag */
+ skb_frag_t frag;
+
+ /* .ndo_xdp_xmit(), XSk ``XDP_TX`` */
+ struct {
+ union {
+ struct xdp_frame *xdpf;
+ dma_addr_t dma;
+
+ struct libeth_xdp_buff *xsk;
+ };
+ union {
+ struct {
+ u32 len;
+ u32 flags;
+ };
+ aligned_u64 opts;
+ };
+ };
+
+ /* XSk xmit */
+ struct xdp_desc desc;
+ };
+} __aligned(sizeof(struct xdp_desc));
+static_assert(offsetof(struct libeth_xdp_tx_frame, frag.len) ==
+ offsetof(struct libeth_xdp_tx_frame, len_fl));
+static_assert(sizeof(struct libeth_xdp_tx_frame) == sizeof(struct xdp_desc));
+
+/**
+ * struct libeth_xdp_tx_bulk - XDP Tx frame bulk for bulk sending
+ * @prog: corresponding active XDP program, %NULL for .ndo_xdp_xmit()
+ * @dev: &net_device which the frames are transmitted on
+ * @xdpsq: shortcut to the corresponding driver-specific XDPSQ structure
+ * @act_mask: Rx only, mask of all the XDP prog verdicts for that NAPI session
+ * @count: current number of frames in @bulk
+ * @bulk: array of queued frames for bulk Tx
+ *
+ * All XDP Tx operations except XSk xmit queue each frame to the bulk first
+ * and flush it when @count reaches the array end. Bulk is always placed on
+ * the stack for performance. One bulk element contains all the data necessary
+ * for sending a frame and then freeing it on completion.
+ * For XSk xmit, Tx descriptor array from &xsk_buff_pool is casted directly
+ * to &libeth_xdp_tx_frame as they are compatible and the bulk structure is
+ * not used.
+ */
+struct libeth_xdp_tx_bulk {
+ const struct bpf_prog *prog;
+ struct net_device *dev;
+ void *xdpsq;
+
+ u32 act_mask;
+ u32 count;
+ struct libeth_xdp_tx_frame bulk[LIBETH_XDP_TX_BULK];
+} __aligned(sizeof(struct libeth_xdp_tx_frame));
+
+/**
+ * LIBETH_XDP_ONSTACK_BULK - declare &libeth_xdp_tx_bulk on the stack
+ * @bq: name of the variable to declare
+ *
+ * Helper to declare a bulk on the stack with a compiler hint that it should
+ * not be initialized automatically (with `CONFIG_INIT_STACK_ALL_*`) for
+ * performance reasons.
+ */
+#define LIBETH_XDP_ONSTACK_BULK(bq) \
+ struct libeth_xdp_tx_bulk bq __uninitialized
+
+/**
+ * struct libeth_xdpsq - abstraction for an XDPSQ
+ * @pool: XSk buffer pool for XSk ``XDP_TX`` and xmit
+ * @sqes: array of Tx buffers from the actual queue struct
+ * @descs: opaque pointer to the HW descriptor array
+ * @ntu: pointer to the next free descriptor index
+ * @count: number of descriptors on that queue
+ * @pending: pointer to the number of sent-not-completed descs on that queue
+ * @xdp_tx: pointer to the above, but only for non-XSk-xmit frames
+ * @lock: corresponding XDPSQ lock
+ *
+ * Abstraction for driver-independent implementation of Tx. Placed on the stack
+ * and filled by the driver before the transmission, so that the generic
+ * functions can access and modify driver-specific resources.
+ */
+struct libeth_xdpsq {
+ struct xsk_buff_pool *pool;
+ struct libeth_sqe *sqes;
+ void *descs;
+
+ u32 *ntu;
+ u32 count;
+
+ u32 *pending;
+ u32 *xdp_tx;
+ struct libeth_xdpsq_lock *lock;
+};
+
+/**
+ * struct libeth_xdp_tx_desc - abstraction for an XDP Tx descriptor
+ * @addr: DMA address of the frame
+ * @len: length of the frame
+ * @flags: XDP Tx flags
+ * @opts: combined @len + @flags for speed
+ *
+ * Filled by the generic functions and then passed to driver-specific functions
+ * to fill a HW Tx descriptor, always placed on the [function] stack.
+ */
+struct libeth_xdp_tx_desc {
+ dma_addr_t addr;
+ union {
+ struct {
+ u32 len;
+ u32 flags;
+ };
+ aligned_u64 opts;
+ };
+} __aligned_largest;
+
+/**
+ * libeth_xdp_ptr_to_priv - convert pointer to a libeth_xdp u64 priv
+ * @ptr: pointer to convert
+ *
+ * The main sending function passes private data as the largest scalar, u64.
+ * Use this helper when you want to pass a pointer there.
+ */
+#define libeth_xdp_ptr_to_priv(ptr) ({ \
+ typecheck_pointer(ptr); \
+ ((u64)(uintptr_t)(ptr)); \
+})
+/**
+ * libeth_xdp_priv_to_ptr - convert libeth_xdp u64 priv to a pointer
+ * @priv: private data to convert
+ *
+ * The main sending function passes private data as the largest scalar, u64.
+ * Use this helper when your callback takes this u64 and you want to convert
+ * it back to a pointer.
+ */
+#define libeth_xdp_priv_to_ptr(priv) ({ \
+ static_assert(__same_type(priv, u64)); \
+ ((const void *)(uintptr_t)(priv)); \
+})
+
+/*
+ * On 64-bit systems, assigning one u64 is faster than two u32s. When ::len
+ * occupies lowest 32 bits (LE), whole ::opts can be assigned directly instead.
+ */
+#ifdef __LITTLE_ENDIAN
+#define __LIBETH_WORD_ACCESS 1
+#endif
+#ifdef __LIBETH_WORD_ACCESS
+#define __libeth_xdp_tx_len(flen, ...) \
+ .opts = ((flen) | FIELD_PREP(GENMASK_ULL(63, 32), (__VA_ARGS__ + 0)))
+#else
+#define __libeth_xdp_tx_len(flen, ...) \
+ .len = (flen), .flags = (__VA_ARGS__ + 0)
+#endif
+
+/**
+ * libeth_xdp_tx_xmit_bulk - main XDP Tx function
+ * @bulk: array of frames to send
+ * @xdpsq: pointer to the driver-specific XDPSQ struct
+ * @n: number of frames to send
+ * @unroll: whether to unroll the queue filling loop for speed
+ * @priv: driver-specific private data
+ * @prep: callback for cleaning the queue and filling abstract &libeth_xdpsq
+ * @fill: internal callback for filling &libeth_sqe and &libeth_xdp_tx_desc
+ * @xmit: callback for filling a HW descriptor with the frame info
+ *
+ * Internal abstraction for placing @n XDP Tx frames on the HW XDPSQ. Used for
+ * all types of frames: ``XDP_TX``, .ndo_xdp_xmit(), XSk ``XDP_TX``, and XSk
+ * xmit.
+ * @prep must lock the queue as this function releases it at the end. @unroll
+ * greatly increases the object code size, but also greatly increases XSk xmit
+ * performance; for other types of frames, it's not enabled.
+ * The compilers inline all those onstack abstractions to direct data accesses.
+ *
+ * Return: number of frames actually placed on the queue, <= @n. The function
+ * can't fail, but can send less frames if there's no enough free descriptors
+ * available. The actual free space is returned by @prep from the driver.
+ */
+static __always_inline u32
+libeth_xdp_tx_xmit_bulk(const struct libeth_xdp_tx_frame *bulk, void *xdpsq,
+ u32 n, bool unroll, u64 priv,
+ u32 (*prep)(void *xdpsq, struct libeth_xdpsq *sq),
+ struct libeth_xdp_tx_desc
+ (*fill)(struct libeth_xdp_tx_frame frm, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv),
+ void (*xmit)(struct libeth_xdp_tx_desc desc, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv))
+{
+ struct libeth_xdpsq sq __uninitialized;
+ u32 this, batched, off = 0;
+ u32 ntu, i = 0;
+
+ n = min(n, prep(xdpsq, &sq));
+ if (unlikely(!n))
+ goto unlock;
+
+ ntu = *sq.ntu;
+
+ this = sq.count - ntu;
+ if (likely(this > n))
+ this = n;
+
+again:
+ if (!unroll)
+ goto linear;
+
+ batched = ALIGN_DOWN(this, LIBETH_XDP_TX_BATCH);
+
+ for ( ; i < off + batched; i += LIBETH_XDP_TX_BATCH) {
+ u32 base = ntu + i - off;
+
+ unrolled_count(LIBETH_XDP_TX_BATCH)
+ for (u32 j = 0; j < LIBETH_XDP_TX_BATCH; j++)
+ xmit(fill(bulk[i + j], base + j, &sq, priv),
+ base + j, &sq, priv);
+ }
+
+ if (batched < this) {
+linear:
+ for ( ; i < off + this; i++)
+ xmit(fill(bulk[i], ntu + i - off, &sq, priv),
+ ntu + i - off, &sq, priv);
+ }
+
+ ntu += this;
+ if (likely(ntu < sq.count))
+ goto out;
+
+ ntu = 0;
+
+ if (i < n) {
+ this = n - i;
+ off = i;
+
+ goto again;
+ }
+
+out:
+ *sq.ntu = ntu;
+ *sq.pending += n;
+ if (sq.xdp_tx)
+ *sq.xdp_tx += n;
+
+unlock:
+ libeth_xdpsq_unlock(sq.lock);
+
+ return n;
+}
+
+/* ``XDP_TX`` bulking */
+
+void libeth_xdp_return_buff_slow(struct libeth_xdp_buff *xdp);
+
+/**
+ * libeth_xdp_tx_queue_head - internal helper for queueing one ``XDP_TX`` head
+ * @bq: XDP Tx bulk to queue the head frag to
+ * @xdp: XDP buffer with the head to queue
+ *
+ * Return: false if it's the only frag of the frame, true if it's an S/G frame.
+ */
+static inline bool libeth_xdp_tx_queue_head(struct libeth_xdp_tx_bulk *bq,
+ const struct libeth_xdp_buff *xdp)
+{
+ const struct xdp_buff *base = &xdp->base;
+
+ bq->bulk[bq->count++] = (typeof(*bq->bulk)){
+ .data = xdp->data,
+ .len_fl = (base->data_end - xdp->data) | LIBETH_XDP_TX_FIRST,
+ .soff = xdp_data_hard_end(base) - xdp->data,
+ };
+
+ if (!xdp_buff_has_frags(base))
+ return false;
+
+ bq->bulk[bq->count - 1].len_fl |= LIBETH_XDP_TX_MULTI;
+
+ return true;
+}
+
+/**
+ * libeth_xdp_tx_queue_frag - internal helper for queueing one ``XDP_TX`` frag
+ * @bq: XDP Tx bulk to queue the frag to
+ * @frag: frag to queue
+ */
+static inline void libeth_xdp_tx_queue_frag(struct libeth_xdp_tx_bulk *bq,
+ const skb_frag_t *frag)
+{
+ bq->bulk[bq->count++].frag = *frag;
+}
+
+/**
+ * libeth_xdp_tx_queue_bulk - internal helper for queueing one ``XDP_TX`` frame
+ * @bq: XDP Tx bulk to queue the frame to
+ * @xdp: XDP buffer to queue
+ * @flush_bulk: driver callback to flush the bulk to the HW queue
+ *
+ * Return: true on success, false on flush error.
+ */
+static __always_inline bool
+libeth_xdp_tx_queue_bulk(struct libeth_xdp_tx_bulk *bq,
+ struct libeth_xdp_buff *xdp,
+ bool (*flush_bulk)(struct libeth_xdp_tx_bulk *bq,
+ u32 flags))
+{
+ const struct skb_shared_info *sinfo;
+ bool ret = true;
+ u32 nr_frags;
+
+ if (unlikely(bq->count == LIBETH_XDP_TX_BULK) &&
+ unlikely(!flush_bulk(bq, 0))) {
+ libeth_xdp_return_buff_slow(xdp);
+ return false;
+ }
+
+ if (!libeth_xdp_tx_queue_head(bq, xdp))
+ goto out;
+
+ sinfo = xdp_get_shared_info_from_buff(&xdp->base);
+ nr_frags = sinfo->nr_frags;
+
+ for (u32 i = 0; i < nr_frags; i++) {
+ if (unlikely(bq->count == LIBETH_XDP_TX_BULK) &&
+ unlikely(!flush_bulk(bq, 0))) {
+ ret = false;
+ break;
+ }
+
+ libeth_xdp_tx_queue_frag(bq, &sinfo->frags[i]);
+ }
+
+out:
+ bq->bulk[bq->count - 1].len_fl |= LIBETH_XDP_TX_LAST;
+ xdp->data = NULL;
+
+ return ret;
+}
+
+/**
+ * libeth_xdp_tx_fill_stats - fill &libeth_sqe with ``XDP_TX`` frame stats
+ * @sqe: SQ element to fill
+ * @desc: libeth_xdp Tx descriptor
+ * @sinfo: &skb_shared_info for this frame
+ *
+ * Internal helper for filling an SQE with the frame stats, do not use in
+ * drivers. Fills the number of frags and bytes for this frame.
+ */
+#define libeth_xdp_tx_fill_stats(sqe, desc, sinfo) \
+ __libeth_xdp_tx_fill_stats(sqe, desc, sinfo, __UNIQUE_ID(sqe_), \
+ __UNIQUE_ID(desc_), __UNIQUE_ID(sinfo_))
+
+#define __libeth_xdp_tx_fill_stats(sqe, desc, sinfo, ue, ud, us) do { \
+ const struct libeth_xdp_tx_desc *ud = (desc); \
+ const struct skb_shared_info *us; \
+ struct libeth_sqe *ue = (sqe); \
+ \
+ ue->nr_frags = 1; \
+ ue->bytes = ud->len; \
+ \
+ if (ud->flags & LIBETH_XDP_TX_MULTI) { \
+ us = (sinfo); \
+ ue->nr_frags += us->nr_frags; \
+ ue->bytes += us->xdp_frags_size; \
+ } \
+} while (0)
+
+/**
+ * libeth_xdp_tx_fill_buf - internal helper to fill one ``XDP_TX`` &libeth_sqe
+ * @frm: XDP Tx frame from the bulk
+ * @i: index on the HW queue
+ * @sq: XDPSQ abstraction for the queue
+ * @priv: private data
+ *
+ * Return: XDP Tx descriptor with the synced DMA and other info to pass to
+ * the driver callback.
+ */
+static inline struct libeth_xdp_tx_desc
+libeth_xdp_tx_fill_buf(struct libeth_xdp_tx_frame frm, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv)
+{
+ struct libeth_xdp_tx_desc desc;
+ struct skb_shared_info *sinfo;
+ skb_frag_t *frag = &frm.frag;
+ struct libeth_sqe *sqe;
+ netmem_ref netmem;
+
+ if (frm.len_fl & LIBETH_XDP_TX_FIRST) {
+ sinfo = frm.data + frm.soff;
+ skb_frag_fill_netmem_desc(frag, virt_to_netmem(frm.data),
+ offset_in_page(frm.data),
+ frm.len_fl);
+ } else {
+ sinfo = NULL;
+ }
+
+ netmem = skb_frag_netmem(frag);
+ desc = (typeof(desc)){
+ .addr = page_pool_get_dma_addr_netmem(netmem) +
+ skb_frag_off(frag),
+ .len = skb_frag_size(frag) & LIBETH_XDP_TX_LEN,
+ .flags = skb_frag_size(frag) & LIBETH_XDP_TX_FLAGS,
+ };
+
+ dma_sync_single_for_device(__netmem_get_pp(netmem)->p.dev, desc.addr,
+ desc.len, DMA_BIDIRECTIONAL);
+
+ if (!sinfo)
+ return desc;
+
+ sqe = &sq->sqes[i];
+ sqe->type = LIBETH_SQE_XDP_TX;
+ sqe->sinfo = sinfo;
+ libeth_xdp_tx_fill_stats(sqe, &desc, sinfo);
+
+ return desc;
+}
+
+void libeth_xdp_tx_exception(struct libeth_xdp_tx_bulk *bq, u32 sent,
+ u32 flags);
+
+/**
+ * __libeth_xdp_tx_flush_bulk - internal helper to flush one XDP Tx bulk
+ * @bq: bulk to flush
+ * @flags: XDP TX flags (.ndo_xdp_xmit(), XSk etc.)
+ * @prep: driver-specific callback to prepare the queue for sending
+ * @fill: libeth_xdp callback to fill &libeth_sqe and &libeth_xdp_tx_desc
+ * @xmit: driver callback to fill a HW descriptor
+ *
+ * Internal abstraction to create bulk flush functions for drivers. Used for
+ * everything except XSk xmit.
+ *
+ * Return: true if anything was sent, false otherwise.
+ */
+static __always_inline bool
+__libeth_xdp_tx_flush_bulk(struct libeth_xdp_tx_bulk *bq, u32 flags,
+ u32 (*prep)(void *xdpsq, struct libeth_xdpsq *sq),
+ struct libeth_xdp_tx_desc
+ (*fill)(struct libeth_xdp_tx_frame frm, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv),
+ void (*xmit)(struct libeth_xdp_tx_desc desc, u32 i,
+ const struct libeth_xdpsq *sq,
+ u64 priv))
+{
+ u32 sent, drops;
+ int err = 0;
+
+ sent = libeth_xdp_tx_xmit_bulk(bq->bulk, bq->xdpsq,
+ min(bq->count, LIBETH_XDP_TX_BULK),
+ false, 0, prep, fill, xmit);
+ drops = bq->count - sent;
+
+ if (unlikely(drops)) {
+ libeth_xdp_tx_exception(bq, sent, flags);
+ err = -ENXIO;
+ } else {
+ bq->count = 0;
+ }
+
+ trace_xdp_bulk_tx(bq->dev, sent, drops, err);
+
+ return likely(sent);
+}
+
+/**
+ * libeth_xdp_tx_flush_bulk - wrapper to define flush of one ``XDP_TX`` bulk
+ * @bq: bulk to flush
+ * @flags: Tx flags, see above
+ * @prep: driver callback to prepare the queue
+ * @xmit: driver callback to fill a HW descriptor
+ *
+ * Use via LIBETH_XDP_DEFINE_FLUSH_TX() to define an ``XDP_TX`` driver
+ * callback.
+ */
+#define libeth_xdp_tx_flush_bulk(bq, flags, prep, xmit) \
+ __libeth_xdp_tx_flush_bulk(bq, flags, prep, libeth_xdp_tx_fill_buf, \
+ xmit)
+
+/* .ndo_xdp_xmit() implementation */
+
+/**
+ * libeth_xdp_xmit_init_bulk - internal helper to initialize bulk for XDP xmit
+ * @bq: bulk to initialize
+ * @dev: target &net_device
+ * @xdpsqs: array of driver-specific XDPSQ structs
+ * @num: number of active XDPSQs (the above array length)
+ */
+#define libeth_xdp_xmit_init_bulk(bq, dev, xdpsqs, num) \
+ __libeth_xdp_xmit_init_bulk(bq, dev, (xdpsqs)[libeth_xdpsq_id(num)])
+
+static inline void __libeth_xdp_xmit_init_bulk(struct libeth_xdp_tx_bulk *bq,
+ struct net_device *dev,
+ void *xdpsq)
+{
+ bq->dev = dev;
+ bq->xdpsq = xdpsq;
+ bq->count = 0;
+}
+
+/**
+ * libeth_xdp_xmit_frame_dma - internal helper to access DMA of an &xdp_frame
+ * @xf: pointer to the XDP frame
+ *
+ * There's no place in &libeth_xdp_tx_frame to store DMA address for an
+ * &xdp_frame head. The headroom is used then, the address is placed right
+ * after the frame struct, naturally aligned.
+ *
+ * Return: pointer to the DMA address to use.
+ */
+#define libeth_xdp_xmit_frame_dma(xf) \
+ _Generic((xf), \
+ const struct xdp_frame *: \
+ (const dma_addr_t *)__libeth_xdp_xmit_frame_dma(xf), \
+ struct xdp_frame *: \
+ (dma_addr_t *)__libeth_xdp_xmit_frame_dma(xf) \
+ )
+
+static inline void *__libeth_xdp_xmit_frame_dma(const struct xdp_frame *xdpf)
+{
+ void *addr = (void *)(xdpf + 1);
+
+ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
+ __alignof(*xdpf) < sizeof(dma_addr_t))
+ addr = PTR_ALIGN(addr, sizeof(dma_addr_t));
+
+ return addr;
+}
+
+/**
+ * libeth_xdp_xmit_queue_head - internal helper for queueing one XDP xmit head
+ * @bq: XDP Tx bulk to queue the head frag to
+ * @xdpf: XDP frame with the head to queue
+ * @dev: device to perform DMA mapping
+ *
+ * Return: ``LIBETH_XDP_DROP`` on DMA mapping error,
+ * ``LIBETH_XDP_PASS`` if it's the only frag in the frame,
+ * ``LIBETH_XDP_TX`` if it's an S/G frame.
+ */
+static inline u32 libeth_xdp_xmit_queue_head(struct libeth_xdp_tx_bulk *bq,
+ struct xdp_frame *xdpf,
+ struct device *dev)
+{
+ dma_addr_t dma;
+
+ dma = dma_map_single(dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ return LIBETH_XDP_DROP;
+
+ *libeth_xdp_xmit_frame_dma(xdpf) = dma;
+
+ bq->bulk[bq->count++] = (typeof(*bq->bulk)){
+ .xdpf = xdpf,
+ __libeth_xdp_tx_len(xdpf->len, LIBETH_XDP_TX_FIRST),
+ };
+
+ if (!xdp_frame_has_frags(xdpf))
+ return LIBETH_XDP_PASS;
+
+ bq->bulk[bq->count - 1].flags |= LIBETH_XDP_TX_MULTI;
+
+ return LIBETH_XDP_TX;
+}
+
+/**
+ * libeth_xdp_xmit_queue_frag - internal helper for queueing one XDP xmit frag
+ * @bq: XDP Tx bulk to queue the frag to
+ * @frag: frag to queue
+ * @dev: device to perform DMA mapping
+ *
+ * Return: true on success, false on DMA mapping error.
+ */
+static inline bool libeth_xdp_xmit_queue_frag(struct libeth_xdp_tx_bulk *bq,
+ const skb_frag_t *frag,
+ struct device *dev)
+{
+ dma_addr_t dma;
+
+ dma = skb_frag_dma_map(dev, frag);
+ if (dma_mapping_error(dev, dma))
+ return false;
+
+ bq->bulk[bq->count++] = (typeof(*bq->bulk)){
+ .dma = dma,
+ __libeth_xdp_tx_len(skb_frag_size(frag)),
+ };
+
+ return true;
+}
+
+/**
+ * libeth_xdp_xmit_queue_bulk - internal helper for queueing one XDP xmit frame
+ * @bq: XDP Tx bulk to queue the frame to
+ * @xdpf: XDP frame to queue
+ * @flush_bulk: driver callback to flush the bulk to the HW queue
+ *
+ * Return: ``LIBETH_XDP_TX`` on success,
+ * ``LIBETH_XDP_DROP`` if the frame should be dropped by the stack,
+ * ``LIBETH_XDP_ABORTED`` if the frame will be dropped by libeth_xdp.
+ */
+static __always_inline u32
+libeth_xdp_xmit_queue_bulk(struct libeth_xdp_tx_bulk *bq,
+ struct xdp_frame *xdpf,
+ bool (*flush_bulk)(struct libeth_xdp_tx_bulk *bq,
+ u32 flags))
+{
+ u32 head, nr_frags, i, ret = LIBETH_XDP_TX;
+ struct device *dev = bq->dev->dev.parent;
+ const struct skb_shared_info *sinfo;
+
+ if (unlikely(bq->count == LIBETH_XDP_TX_BULK) &&
+ unlikely(!flush_bulk(bq, LIBETH_XDP_TX_NDO)))
+ return LIBETH_XDP_DROP;
+
+ head = libeth_xdp_xmit_queue_head(bq, xdpf, dev);
+ if (head == LIBETH_XDP_PASS)
+ goto out;
+ else if (head == LIBETH_XDP_DROP)
+ return LIBETH_XDP_DROP;
+
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ nr_frags = sinfo->nr_frags;
+
+ for (i = 0; i < nr_frags; i++) {
+ if (unlikely(bq->count == LIBETH_XDP_TX_BULK) &&
+ unlikely(!flush_bulk(bq, LIBETH_XDP_TX_NDO)))
+ break;
+
+ if (!libeth_xdp_xmit_queue_frag(bq, &sinfo->frags[i], dev))
+ break;
+ }
+
+ if (unlikely(i < nr_frags))
+ ret = LIBETH_XDP_ABORTED;
+
+out:
+ bq->bulk[bq->count - 1].flags |= LIBETH_XDP_TX_LAST;
+
+ return ret;
+}
+
+/**
+ * libeth_xdp_xmit_fill_buf - internal helper to fill one XDP xmit &libeth_sqe
+ * @frm: XDP Tx frame from the bulk
+ * @i: index on the HW queue
+ * @sq: XDPSQ abstraction for the queue
+ * @priv: private data
+ *
+ * Return: XDP Tx descriptor with the mapped DMA and other info to pass to
+ * the driver callback.
+ */
+static inline struct libeth_xdp_tx_desc
+libeth_xdp_xmit_fill_buf(struct libeth_xdp_tx_frame frm, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv)
+{
+ struct libeth_xdp_tx_desc desc;
+ struct libeth_sqe *sqe;
+ struct xdp_frame *xdpf;
+
+ if (frm.flags & LIBETH_XDP_TX_FIRST) {
+ xdpf = frm.xdpf;
+ desc.addr = *libeth_xdp_xmit_frame_dma(xdpf);
+ } else {
+ xdpf = NULL;
+ desc.addr = frm.dma;
+ }
+ desc.opts = frm.opts;
+
+ sqe = &sq->sqes[i];
+ dma_unmap_addr_set(sqe, dma, desc.addr);
+ dma_unmap_len_set(sqe, len, desc.len);
+
+ if (!xdpf) {
+ sqe->type = LIBETH_SQE_XDP_XMIT_FRAG;
+ return desc;
+ }
+
+ sqe->type = LIBETH_SQE_XDP_XMIT;
+ sqe->xdpf = xdpf;
+ libeth_xdp_tx_fill_stats(sqe, &desc,
+ xdp_get_shared_info_from_frame(xdpf));
+
+ return desc;
+}
+
+/**
+ * libeth_xdp_xmit_flush_bulk - wrapper to define flush of one XDP xmit bulk
+ * @bq: bulk to flush
+ * @flags: Tx flags, see __libeth_xdp_tx_flush_bulk()
+ * @prep: driver callback to prepare the queue
+ * @xmit: driver callback to fill a HW descriptor
+ *
+ * Use via LIBETH_XDP_DEFINE_FLUSH_XMIT() to define an XDP xmit driver
+ * callback.
+ */
+#define libeth_xdp_xmit_flush_bulk(bq, flags, prep, xmit) \
+ __libeth_xdp_tx_flush_bulk(bq, (flags) | LIBETH_XDP_TX_NDO, prep, \
+ libeth_xdp_xmit_fill_buf, xmit)
+
+u32 libeth_xdp_xmit_return_bulk(const struct libeth_xdp_tx_frame *bq,
+ u32 count, const struct net_device *dev);
+
+/**
+ * __libeth_xdp_xmit_do_bulk - internal function to implement .ndo_xdp_xmit()
+ * @bq: XDP Tx bulk to queue frames to
+ * @frames: XDP frames passed by the stack
+ * @n: number of frames
+ * @flags: flags passed by the stack
+ * @flush_bulk: driver callback to flush an XDP xmit bulk
+ * @finalize: driver callback to finalize sending XDP Tx frames on the queue
+ *
+ * Perform common checks, map the frags and queue them to the bulk, then flush
+ * the bulk to the XDPSQ. If requested by the stack, finalize the queue.
+ *
+ * Return: number of frames send or -errno on error.
+ */
+static __always_inline int
+__libeth_xdp_xmit_do_bulk(struct libeth_xdp_tx_bulk *bq,
+ struct xdp_frame **frames, u32 n, u32 flags,
+ bool (*flush_bulk)(struct libeth_xdp_tx_bulk *bq,
+ u32 flags),
+ void (*finalize)(void *xdpsq, bool sent, bool flush))
+{
+ u32 nxmit = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (u32 i = 0; likely(i < n); i++) {
+ u32 ret;
+
+ ret = libeth_xdp_xmit_queue_bulk(bq, frames[i], flush_bulk);
+ if (unlikely(ret != LIBETH_XDP_TX)) {
+ nxmit += ret == LIBETH_XDP_ABORTED;
+ break;
+ }
+
+ nxmit++;
+ }
+
+ if (bq->count) {
+ flush_bulk(bq, LIBETH_XDP_TX_NDO);
+ if (unlikely(bq->count))
+ nxmit -= libeth_xdp_xmit_return_bulk(bq->bulk,
+ bq->count,
+ bq->dev);
+ }
+
+ finalize(bq->xdpsq, nxmit, flags & XDP_XMIT_FLUSH);
+
+ return nxmit;
+}
+
+/**
+ * libeth_xdp_xmit_do_bulk - implement full .ndo_xdp_xmit() in driver
+ * @dev: target &net_device
+ * @n: number of frames to send
+ * @fr: XDP frames to send
+ * @f: flags passed by the stack
+ * @xqs: array of XDPSQs driver structs
+ * @nqs: number of active XDPSQs, the above array length
+ * @fl: driver callback to flush an XDP xmit bulk
+ * @fin: driver cabback to finalize the queue
+ *
+ * If the driver has active XDPSQs, perform common checks and send the frames.
+ * Finalize the queue, if requested.
+ *
+ * Return: number of frames sent or -errno on error.
+ */
+#define libeth_xdp_xmit_do_bulk(dev, n, fr, f, xqs, nqs, fl, fin) \
+ _libeth_xdp_xmit_do_bulk(dev, n, fr, f, xqs, nqs, fl, fin, \
+ __UNIQUE_ID(bq_), __UNIQUE_ID(ret_), \
+ __UNIQUE_ID(nqs_))
+
+#define _libeth_xdp_xmit_do_bulk(d, n, fr, f, xqs, nqs, fl, fin, ub, ur, un) \
+({ \
+ u32 un = (nqs); \
+ int ur; \
+ \
+ if (likely(un)) { \
+ LIBETH_XDP_ONSTACK_BULK(ub); \
+ \
+ libeth_xdp_xmit_init_bulk(&ub, d, xqs, un); \
+ ur = __libeth_xdp_xmit_do_bulk(&ub, fr, n, f, fl, fin); \
+ } else { \
+ ur = -ENXIO; \
+ } \
+ \
+ ur; \
+})
+
+/* Rx polling path */
+
+/**
+ * libeth_xdp_tx_init_bulk - initialize an XDP Tx bulk for Rx NAPI poll
+ * @bq: bulk to initialize
+ * @prog: RCU pointer to the XDP program (can be %NULL)
+ * @dev: target &net_device
+ * @xdpsqs: array of driver XDPSQ structs
+ * @num: number of active XDPSQs, the above array length
+ *
+ * Should be called on an onstack XDP Tx bulk before the NAPI polling loop.
+ * Initializes all the needed fields to run libeth_xdp functions. If @num == 0,
+ * assumes XDP is not enabled.
+ * Do not use for XSk, it has its own optimized helper.
+ */
+#define libeth_xdp_tx_init_bulk(bq, prog, dev, xdpsqs, num) \
+ __libeth_xdp_tx_init_bulk(bq, prog, dev, xdpsqs, num, false, \
+ __UNIQUE_ID(bq_), __UNIQUE_ID(nqs_))
+
+#define __libeth_xdp_tx_init_bulk(bq, pr, d, xdpsqs, num, xsk, ub, un) do { \
+ typeof(bq) ub = (bq); \
+ u32 un = (num); \
+ \
+ rcu_read_lock(); \
+ \
+ if (un || (xsk)) { \
+ ub->prog = rcu_dereference(pr); \
+ ub->dev = (d); \
+ ub->xdpsq = (xdpsqs)[libeth_xdpsq_id(un)]; \
+ } else { \
+ ub->prog = NULL; \
+ } \
+ \
+ ub->act_mask = 0; \
+ ub->count = 0; \
+} while (0)
+
+void libeth_xdp_load_stash(struct libeth_xdp_buff *dst,
+ const struct libeth_xdp_buff_stash *src);
+void libeth_xdp_save_stash(struct libeth_xdp_buff_stash *dst,
+ const struct libeth_xdp_buff *src);
+void __libeth_xdp_return_stash(struct libeth_xdp_buff_stash *stash);
+
+/**
+ * libeth_xdp_init_buff - initialize a &libeth_xdp_buff for Rx NAPI poll
+ * @dst: onstack buffer to initialize
+ * @src: XDP buffer stash placed on the queue
+ * @rxq: registered &xdp_rxq_info corresponding to this queue
+ *
+ * Should be called before the main NAPI polling loop. Loads the content of
+ * the previously saved stash or initializes the buffer from scratch.
+ * Do not use for XSk.
+ */
+static inline void
+libeth_xdp_init_buff(struct libeth_xdp_buff *dst,
+ const struct libeth_xdp_buff_stash *src,
+ struct xdp_rxq_info *rxq)
+{
+ if (likely(!src->data))
+ dst->data = NULL;
+ else
+ libeth_xdp_load_stash(dst, src);
+
+ dst->base.rxq = rxq;
+}
+
+/**
+ * libeth_xdp_save_buff - save a partially built buffer on a queue
+ * @dst: XDP buffer stash placed on the queue
+ * @src: onstack buffer to save
+ *
+ * Should be called after the main NAPI polling loop. If the loop exited before
+ * the buffer was finished, saves its content on the queue, so that it can be
+ * completed during the next poll. Otherwise, clears the stash.
+ */
+static inline void libeth_xdp_save_buff(struct libeth_xdp_buff_stash *dst,
+ const struct libeth_xdp_buff *src)
+{
+ if (likely(!src->data))
+ dst->data = NULL;
+ else
+ libeth_xdp_save_stash(dst, src);
+}
+
+/**
+ * libeth_xdp_return_stash - free an XDP buffer stash from a queue
+ * @stash: stash to free
+ *
+ * If the queue is about to be destroyed, but it still has an incompleted
+ * buffer stash, this helper should be called to free it.
+ */
+static inline void libeth_xdp_return_stash(struct libeth_xdp_buff_stash *stash)
+{
+ if (stash->data)
+ __libeth_xdp_return_stash(stash);
+}
+
+static inline void libeth_xdp_return_va(const void *data, bool napi)
+{
+ netmem_ref netmem = virt_to_netmem(data);
+
+ page_pool_put_full_netmem(__netmem_get_pp(netmem), netmem, napi);
+}
+
+static inline void libeth_xdp_return_frags(const struct skb_shared_info *sinfo,
+ bool napi)
+{
+ for (u32 i = 0; i < sinfo->nr_frags; i++) {
+ netmem_ref netmem = skb_frag_netmem(&sinfo->frags[i]);
+
+ page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, napi);
+ }
+}
+
+/**
+ * libeth_xdp_return_buff - free/recycle &libeth_xdp_buff
+ * @xdp: buffer to free
+ *
+ * Hotpath helper to free &libeth_xdp_buff. Comparing to xdp_return_buff(),
+ * it's faster as it gets inlined and always assumes order-0 pages and safe
+ * direct recycling. Zeroes @xdp->data to avoid UAFs.
+ */
+#define libeth_xdp_return_buff(xdp) __libeth_xdp_return_buff(xdp, true)
+
+static inline void __libeth_xdp_return_buff(struct libeth_xdp_buff *xdp,
+ bool napi)
+{
+ if (!xdp_buff_has_frags(&xdp->base))
+ goto out;
+
+ libeth_xdp_return_frags(xdp_get_shared_info_from_buff(&xdp->base),
+ napi);
+
+out:
+ libeth_xdp_return_va(xdp->data, napi);
+ xdp->data = NULL;
+}
+
+bool libeth_xdp_buff_add_frag(struct libeth_xdp_buff *xdp,
+ const struct libeth_fqe *fqe,
+ u32 len);
+
+/**
+ * libeth_xdp_prepare_buff - fill &libeth_xdp_buff with head FQE data
+ * @xdp: XDP buffer to attach the head to
+ * @fqe: FQE containing the head buffer
+ * @len: buffer len passed from HW
+ *
+ * Internal, use libeth_xdp_process_buff() instead. Initializes XDP buffer
+ * head with the Rx buffer data: data pointer, length, headroom, and
+ * truesize/tailroom. Zeroes the flags.
+ * Uses faster single u64 write instead of per-field access.
+ */
+static inline void libeth_xdp_prepare_buff(struct libeth_xdp_buff *xdp,
+ const struct libeth_fqe *fqe,
+ u32 len)
+{
+ const struct page *page = __netmem_to_page(fqe->netmem);
+
+#ifdef __LIBETH_WORD_ACCESS
+ static_assert(offsetofend(typeof(xdp->base), flags) -
+ offsetof(typeof(xdp->base), frame_sz) ==
+ sizeof(u64));
+
+ *(u64 *)&xdp->base.frame_sz = fqe->truesize;
+#else
+ xdp_init_buff(&xdp->base, fqe->truesize, xdp->base.rxq);
+#endif
+ xdp_prepare_buff(&xdp->base, page_address(page) + fqe->offset,
+ pp_page_to_nmdesc(page)->pp->p.offset, len, true);
+}
+
+/**
+ * libeth_xdp_process_buff - attach Rx buffer to &libeth_xdp_buff
+ * @xdp: XDP buffer to attach the Rx buffer to
+ * @fqe: Rx buffer to process
+ * @len: received data length from the descriptor
+ *
+ * If the XDP buffer is empty, attaches the Rx buffer as head and initializes
+ * the required fields. Otherwise, attaches the buffer as a frag.
+ * Already performs DMA sync-for-CPU and frame start prefetch
+ * (for head buffers only).
+ *
+ * Return: true on success, false if the descriptor must be skipped (empty or
+ * no space for a new frag).
+ */
+static inline bool libeth_xdp_process_buff(struct libeth_xdp_buff *xdp,
+ const struct libeth_fqe *fqe,
+ u32 len)
+{
+ if (!libeth_rx_sync_for_cpu(fqe, len))
+ return false;
+
+ if (xdp->data)
+ return libeth_xdp_buff_add_frag(xdp, fqe, len);
+
+ libeth_xdp_prepare_buff(xdp, fqe, len);
+
+ prefetch(xdp->data);
+
+ return true;
+}
+
+/**
+ * libeth_xdp_buff_stats_frags - update onstack RQ stats with XDP frags info
+ * @ss: onstack stats to update
+ * @xdp: buffer to account
+ *
+ * Internal helper used by __libeth_xdp_run_pass(), do not call directly.
+ * Adds buffer's frags count and total len to the onstack stats.
+ */
+static inline void
+libeth_xdp_buff_stats_frags(struct libeth_rq_napi_stats *ss,
+ const struct libeth_xdp_buff *xdp)
+{
+ const struct skb_shared_info *sinfo;
+
+ sinfo = xdp_get_shared_info_from_buff(&xdp->base);
+ ss->bytes += sinfo->xdp_frags_size;
+ ss->fragments += sinfo->nr_frags + 1;
+}
+
+u32 libeth_xdp_prog_exception(const struct libeth_xdp_tx_bulk *bq,
+ struct libeth_xdp_buff *xdp,
+ enum xdp_action act, int ret);
+
+/**
+ * __libeth_xdp_run_prog - run XDP program on an XDP buffer
+ * @xdp: XDP buffer to run the prog on
+ * @bq: buffer bulk for ``XDP_TX`` queueing
+ *
+ * Internal inline abstraction to run XDP program. Handles ``XDP_DROP``
+ * and ``XDP_REDIRECT`` only, the rest is processed levels up.
+ * Reports an XDP prog exception on errors.
+ *
+ * Return: libeth_xdp prog verdict depending on the prog's verdict.
+ */
+static __always_inline u32
+__libeth_xdp_run_prog(struct libeth_xdp_buff *xdp,
+ const struct libeth_xdp_tx_bulk *bq)
+{
+ enum xdp_action act;
+
+ act = bpf_prog_run_xdp(bq->prog, &xdp->base);
+ if (unlikely(act < XDP_DROP || act > XDP_REDIRECT))
+ goto out;
+
+ switch (act) {
+ case XDP_PASS:
+ return LIBETH_XDP_PASS;
+ case XDP_DROP:
+ libeth_xdp_return_buff(xdp);
+
+ return LIBETH_XDP_DROP;
+ case XDP_TX:
+ return LIBETH_XDP_TX;
+ case XDP_REDIRECT:
+ if (unlikely(xdp_do_redirect(bq->dev, &xdp->base, bq->prog)))
+ break;
+
+ xdp->data = NULL;
+
+ return LIBETH_XDP_REDIRECT;
+ default:
+ break;
+ }
+
+out:
+ return libeth_xdp_prog_exception(bq, xdp, act, 0);
+}
+
+/**
+ * __libeth_xdp_run_flush - run XDP program and handle ``XDP_TX`` verdict
+ * @xdp: XDP buffer to run the prog on
+ * @bq: buffer bulk for ``XDP_TX`` queueing
+ * @run: internal callback for running XDP program
+ * @queue: internal callback for queuing ``XDP_TX`` frame
+ * @flush_bulk: driver callback for flushing a bulk
+ *
+ * Internal inline abstraction to run XDP program and additionally handle
+ * ``XDP_TX`` verdict. Used by both XDP and XSk, hence @run and @queue.
+ * Do not use directly.
+ *
+ * Return: libeth_xdp prog verdict depending on the prog's verdict.
+ */
+static __always_inline u32
+__libeth_xdp_run_flush(struct libeth_xdp_buff *xdp,
+ struct libeth_xdp_tx_bulk *bq,
+ u32 (*run)(struct libeth_xdp_buff *xdp,
+ const struct libeth_xdp_tx_bulk *bq),
+ bool (*queue)(struct libeth_xdp_tx_bulk *bq,
+ struct libeth_xdp_buff *xdp,
+ bool (*flush_bulk)
+ (struct libeth_xdp_tx_bulk *bq,
+ u32 flags)),
+ bool (*flush_bulk)(struct libeth_xdp_tx_bulk *bq,
+ u32 flags))
+{
+ u32 act;
+
+ act = run(xdp, bq);
+ if (act == LIBETH_XDP_TX && unlikely(!queue(bq, xdp, flush_bulk)))
+ act = LIBETH_XDP_DROP;
+
+ bq->act_mask |= act;
+
+ return act;
+}
+
+/**
+ * libeth_xdp_run_prog - run XDP program (non-XSk path) and handle all verdicts
+ * @xdp: XDP buffer to process
+ * @bq: XDP Tx bulk to queue ``XDP_TX`` buffers
+ * @fl: driver ``XDP_TX`` bulk flush callback
+ *
+ * Run the attached XDP program and handle all possible verdicts. XSk has its
+ * own version.
+ * Prefer using it via LIBETH_XDP_DEFINE_RUN{,_PASS,_PROG}().
+ *
+ * Return: true if the buffer should be passed up the stack, false if the poll
+ * should go to the next buffer.
+ */
+#define libeth_xdp_run_prog(xdp, bq, fl) \
+ (__libeth_xdp_run_flush(xdp, bq, __libeth_xdp_run_prog, \
+ libeth_xdp_tx_queue_bulk, \
+ fl) == LIBETH_XDP_PASS)
+
+/**
+ * __libeth_xdp_run_pass - helper to run XDP program and handle the result
+ * @xdp: XDP buffer to process
+ * @bq: XDP Tx bulk to queue ``XDP_TX`` frames
+ * @napi: NAPI to build an skb and pass it up the stack
+ * @rs: onstack libeth RQ stats
+ * @md: metadata that should be filled to the XDP buffer
+ * @prep: callback for filling the metadata
+ * @run: driver wrapper to run XDP program
+ * @populate: driver callback to populate an skb with the HW descriptor data
+ *
+ * Inline abstraction that does the following (non-XSk path):
+ * 1) adds frame size and frag number (if needed) to the onstack stats;
+ * 2) fills the descriptor metadata to the onstack &libeth_xdp_buff
+ * 3) runs XDP program if present;
+ * 4) handles all possible verdicts;
+ * 5) on ``XDP_PASS`, builds an skb from the buffer;
+ * 6) populates it with the descriptor metadata;
+ * 7) passes it up the stack.
+ *
+ * In most cases, number 2 means just writing the pointer to the HW descriptor
+ * to the XDP buffer. If so, please use LIBETH_XDP_DEFINE_RUN{,_PASS}()
+ * wrappers to build a driver function.
+ */
+static __always_inline void
+__libeth_xdp_run_pass(struct libeth_xdp_buff *xdp,
+ struct libeth_xdp_tx_bulk *bq, struct napi_struct *napi,
+ struct libeth_rq_napi_stats *rs, const void *md,
+ void (*prep)(struct libeth_xdp_buff *xdp,
+ const void *md),
+ bool (*run)(struct libeth_xdp_buff *xdp,
+ struct libeth_xdp_tx_bulk *bq),
+ bool (*populate)(struct sk_buff *skb,
+ const struct libeth_xdp_buff *xdp,
+ struct libeth_rq_napi_stats *rs))
+{
+ struct sk_buff *skb;
+
+ rs->bytes += xdp->base.data_end - xdp->data;
+ rs->packets++;
+
+ if (xdp_buff_has_frags(&xdp->base))
+ libeth_xdp_buff_stats_frags(rs, xdp);
+
+ if (prep && (!__builtin_constant_p(!!md) || md))
+ prep(xdp, md);
+
+ if (!bq || !run || !bq->prog)
+ goto build;
+
+ if (!run(xdp, bq))
+ return;
+
+build:
+ skb = xdp_build_skb_from_buff(&xdp->base);
+ if (unlikely(!skb)) {
+ libeth_xdp_return_buff_slow(xdp);
+ return;
+ }
+
+ xdp->data = NULL;
+
+ if (unlikely(!populate(skb, xdp, rs))) {
+ napi_consume_skb(skb, true);
+ return;
+ }
+
+ napi_gro_receive(napi, skb);
+}
+
+static inline void libeth_xdp_prep_desc(struct libeth_xdp_buff *xdp,
+ const void *desc)
+{
+ xdp->desc = desc;
+}
+
+/**
+ * libeth_xdp_run_pass - helper to run XDP program and handle the result
+ * @xdp: XDP buffer to process
+ * @bq: XDP Tx bulk to queue ``XDP_TX`` frames
+ * @napi: NAPI to build an skb and pass it up the stack
+ * @ss: onstack libeth RQ stats
+ * @desc: pointer to the HW descriptor for that frame
+ * @run: driver wrapper to run XDP program
+ * @populate: driver callback to populate an skb with the HW descriptor data
+ *
+ * Wrapper around the underscored version when "fill the descriptor metadata"
+ * means just writing the pointer to the HW descriptor as @xdp->desc.
+ */
+#define libeth_xdp_run_pass(xdp, bq, napi, ss, desc, run, populate) \
+ __libeth_xdp_run_pass(xdp, bq, napi, ss, desc, libeth_xdp_prep_desc, \
+ run, populate)
+
+/**
+ * libeth_xdp_finalize_rx - finalize XDPSQ after a NAPI polling loop (non-XSk)
+ * @bq: ``XDP_TX`` frame bulk
+ * @flush: driver callback to flush the bulk
+ * @finalize: driver callback to start sending the frames and run the timer
+ *
+ * Flush the bulk if there are frames left to send, kick the queue and flush
+ * the XDP maps.
+ */
+#define libeth_xdp_finalize_rx(bq, flush, finalize) \
+ __libeth_xdp_finalize_rx(bq, 0, flush, finalize)
+
+static __always_inline void
+__libeth_xdp_finalize_rx(struct libeth_xdp_tx_bulk *bq, u32 flags,
+ bool (*flush_bulk)(struct libeth_xdp_tx_bulk *bq,
+ u32 flags),
+ void (*finalize)(void *xdpsq, bool sent, bool flush))
+{
+ if (bq->act_mask & LIBETH_XDP_TX) {
+ if (bq->count)
+ flush_bulk(bq, flags | LIBETH_XDP_TX_DROP);
+ finalize(bq->xdpsq, true, true);
+ }
+ if (bq->act_mask & LIBETH_XDP_REDIRECT)
+ xdp_do_flush();
+
+ rcu_read_unlock();
+}
+
+/*
+ * Helpers to reduce boilerplate code in drivers.
+ *
+ * Typical driver Rx flow would be (excl. bulk and buff init, frag attach):
+ *
+ * LIBETH_XDP_DEFINE_START();
+ * LIBETH_XDP_DEFINE_FLUSH_TX(static driver_xdp_flush_tx, driver_xdp_tx_prep,
+ * driver_xdp_xmit);
+ * LIBETH_XDP_DEFINE_RUN(static driver_xdp_run, driver_xdp_run_prog,
+ * driver_xdp_flush_tx, driver_populate_skb);
+ * LIBETH_XDP_DEFINE_FINALIZE(static driver_xdp_finalize_rx,
+ * driver_xdp_flush_tx, driver_xdp_finalize_sq);
+ * LIBETH_XDP_DEFINE_END();
+ *
+ * This will build a set of 4 static functions. The compiler is free to decide
+ * whether to inline them.
+ * Then, in the NAPI polling function:
+ *
+ * while (packets < budget) {
+ * // ...
+ * driver_xdp_run(xdp, &bq, napi, &rs, desc);
+ * }
+ * driver_xdp_finalize_rx(&bq);
+ */
+
+#define LIBETH_XDP_DEFINE_START() \
+ __diag_push(); \
+ __diag_ignore(GCC, 8, "-Wold-style-declaration", \
+ "Allow specifying \'static\' after the return type")
+
+/**
+ * LIBETH_XDP_DEFINE_TIMER - define a driver XDPSQ cleanup timer callback
+ * @name: name of the function to define
+ * @poll: Tx polling/completion function
+ */
+#define LIBETH_XDP_DEFINE_TIMER(name, poll) \
+void name(struct work_struct *work) \
+{ \
+ libeth_xdpsq_run_timer(work, poll); \
+}
+
+/**
+ * LIBETH_XDP_DEFINE_FLUSH_TX - define a driver ``XDP_TX`` bulk flush function
+ * @name: name of the function to define
+ * @prep: driver callback to clean an XDPSQ
+ * @xmit: driver callback to write a HW Tx descriptor
+ */
+#define LIBETH_XDP_DEFINE_FLUSH_TX(name, prep, xmit) \
+ __LIBETH_XDP_DEFINE_FLUSH_TX(name, prep, xmit, xdp)
+
+#define __LIBETH_XDP_DEFINE_FLUSH_TX(name, prep, xmit, pfx) \
+bool name(struct libeth_xdp_tx_bulk *bq, u32 flags) \
+{ \
+ return libeth_##pfx##_tx_flush_bulk(bq, flags, prep, xmit); \
+}
+
+/**
+ * LIBETH_XDP_DEFINE_FLUSH_XMIT - define a driver XDP xmit bulk flush function
+ * @name: name of the function to define
+ * @prep: driver callback to clean an XDPSQ
+ * @xmit: driver callback to write a HW Tx descriptor
+ */
+#define LIBETH_XDP_DEFINE_FLUSH_XMIT(name, prep, xmit) \
+bool name(struct libeth_xdp_tx_bulk *bq, u32 flags) \
+{ \
+ return libeth_xdp_xmit_flush_bulk(bq, flags, prep, xmit); \
+}
+
+/**
+ * LIBETH_XDP_DEFINE_RUN_PROG - define a driver XDP program run function
+ * @name: name of the function to define
+ * @flush: driver callback to flush an ``XDP_TX`` bulk
+ */
+#define LIBETH_XDP_DEFINE_RUN_PROG(name, flush) \
+ bool __LIBETH_XDP_DEFINE_RUN_PROG(name, flush, xdp)
+
+#define __LIBETH_XDP_DEFINE_RUN_PROG(name, flush, pfx) \
+name(struct libeth_xdp_buff *xdp, struct libeth_xdp_tx_bulk *bq) \
+{ \
+ return libeth_##pfx##_run_prog(xdp, bq, flush); \
+}
+
+/**
+ * LIBETH_XDP_DEFINE_RUN_PASS - define a driver buffer process + pass function
+ * @name: name of the function to define
+ * @run: driver callback to run XDP program (above)
+ * @populate: driver callback to fill an skb with HW descriptor info
+ */
+#define LIBETH_XDP_DEFINE_RUN_PASS(name, run, populate) \
+ void __LIBETH_XDP_DEFINE_RUN_PASS(name, run, populate, xdp)
+
+#define __LIBETH_XDP_DEFINE_RUN_PASS(name, run, populate, pfx) \
+name(struct libeth_xdp_buff *xdp, struct libeth_xdp_tx_bulk *bq, \
+ struct napi_struct *napi, struct libeth_rq_napi_stats *ss, \
+ const void *desc) \
+{ \
+ return libeth_##pfx##_run_pass(xdp, bq, napi, ss, desc, run, \
+ populate); \
+}
+
+/**
+ * LIBETH_XDP_DEFINE_RUN - define a driver buffer process, run + pass function
+ * @name: name of the function to define
+ * @run: name of the XDP prog run function to define
+ * @flush: driver callback to flush an ``XDP_TX`` bulk
+ * @populate: driver callback to fill an skb with HW descriptor info
+ */
+#define LIBETH_XDP_DEFINE_RUN(name, run, flush, populate) \
+ __LIBETH_XDP_DEFINE_RUN(name, run, flush, populate, XDP)
+
+#define __LIBETH_XDP_DEFINE_RUN(name, run, flush, populate, pfx) \
+ LIBETH_##pfx##_DEFINE_RUN_PROG(static run, flush); \
+ LIBETH_##pfx##_DEFINE_RUN_PASS(name, run, populate)
+
+/**
+ * LIBETH_XDP_DEFINE_FINALIZE - define a driver Rx NAPI poll finalize function
+ * @name: name of the function to define
+ * @flush: driver callback to flush an ``XDP_TX`` bulk
+ * @finalize: driver callback to finalize an XDPSQ and run the timer
+ */
+#define LIBETH_XDP_DEFINE_FINALIZE(name, flush, finalize) \
+ __LIBETH_XDP_DEFINE_FINALIZE(name, flush, finalize, xdp)
+
+#define __LIBETH_XDP_DEFINE_FINALIZE(name, flush, finalize, pfx) \
+void name(struct libeth_xdp_tx_bulk *bq) \
+{ \
+ libeth_##pfx##_finalize_rx(bq, flush, finalize); \
+}
+
+#define LIBETH_XDP_DEFINE_END() __diag_pop()
+
+/* XMO */
+
+/**
+ * libeth_xdp_buff_to_rq - get RQ pointer from an XDP buffer pointer
+ * @xdp: &libeth_xdp_buff corresponding to the queue
+ * @type: typeof() of the driver Rx queue structure
+ * @member: name of &xdp_rxq_info inside @type
+ *
+ * Often times, pointer to the RQ is needed when reading/filling metadata from
+ * HW descriptors. The helper can be used to quickly jump from an XDP buffer
+ * to the queue corresponding to its &xdp_rxq_info without introducing
+ * additional fields (&libeth_xdp_buff is precisely 1 cacheline long on x64).
+ */
+#define libeth_xdp_buff_to_rq(xdp, type, member) \
+ container_of_const((xdp)->base.rxq, type, member)
+
+/**
+ * libeth_xdpmo_rx_hash - convert &libeth_rx_pt to an XDP RSS hash metadata
+ * @hash: pointer to the variable to write the hash to
+ * @rss_type: pointer to the variable to write the hash type to
+ * @val: hash value from the HW descriptor
+ * @pt: libeth parsed packet type
+ *
+ * Handle zeroed/non-available hash and convert libeth parsed packet type to
+ * the corresponding XDP RSS hash type. To be called at the end of
+ * xdp_metadata_ops idpf_xdpmo::xmo_rx_hash() implementation.
+ * Note that if the driver doesn't use a constant packet type lookup table but
+ * generates it at runtime, it must call libeth_rx_pt_gen_hash_type(pt) to
+ * generate XDP RSS hash type for each packet type.
+ *
+ * Return: 0 on success, -ENODATA when the hash is not available.
+ */
+static inline int libeth_xdpmo_rx_hash(u32 *hash,
+ enum xdp_rss_hash_type *rss_type,
+ u32 val, struct libeth_rx_pt pt)
+{
+ if (unlikely(!val))
+ return -ENODATA;
+
+ *hash = val;
+ *rss_type = pt.hash_type;
+
+ return 0;
+}
+
+/* Tx buffer completion */
+
+void libeth_xdp_return_buff_bulk(const struct skb_shared_info *sinfo,
+ struct xdp_frame_bulk *bq, bool frags);
+void libeth_xsk_buff_free_slow(struct libeth_xdp_buff *xdp);
+
+/**
+ * __libeth_xdp_complete_tx - complete sent XDPSQE
+ * @sqe: SQ element / Tx buffer to complete
+ * @cp: Tx polling/completion params
+ * @bulk: internal callback to bulk-free ``XDP_TX`` buffers
+ * @xsk: internal callback to free XSk ``XDP_TX`` buffers
+ *
+ * Use the non-underscored version in drivers instead. This one is shared
+ * internally with libeth_tx_complete_any().
+ * Complete an XDPSQE of any type of XDP frame. This includes DMA unmapping
+ * when needed, buffer freeing, stats update, and SQE invalidation.
+ */
+static __always_inline void
+__libeth_xdp_complete_tx(struct libeth_sqe *sqe, struct libeth_cq_pp *cp,
+ typeof(libeth_xdp_return_buff_bulk) bulk,
+ typeof(libeth_xsk_buff_free_slow) xsk)
+{
+ enum libeth_sqe_type type = sqe->type;
+
+ switch (type) {
+ case LIBETH_SQE_EMPTY:
+ return;
+ case LIBETH_SQE_XDP_XMIT:
+ case LIBETH_SQE_XDP_XMIT_FRAG:
+ dma_unmap_page(cp->dev, dma_unmap_addr(sqe, dma),
+ dma_unmap_len(sqe, len), DMA_TO_DEVICE);
+ break;
+ default:
+ break;
+ }
+
+ switch (type) {
+ case LIBETH_SQE_XDP_TX:
+ bulk(sqe->sinfo, cp->bq, sqe->nr_frags != 1);
+ break;
+ case LIBETH_SQE_XDP_XMIT:
+ xdp_return_frame_bulk(sqe->xdpf, cp->bq);
+ break;
+ case LIBETH_SQE_XSK_TX:
+ case LIBETH_SQE_XSK_TX_FRAG:
+ xsk(sqe->xsk);
+ break;
+ default:
+ break;
+ }
+
+ switch (type) {
+ case LIBETH_SQE_XDP_TX:
+ case LIBETH_SQE_XDP_XMIT:
+ case LIBETH_SQE_XSK_TX:
+ cp->xdp_tx -= sqe->nr_frags;
+
+ cp->xss->packets++;
+ cp->xss->bytes += sqe->bytes;
+ break;
+ default:
+ break;
+ }
+
+ sqe->type = LIBETH_SQE_EMPTY;
+}
+
+static inline void libeth_xdp_complete_tx(struct libeth_sqe *sqe,
+ struct libeth_cq_pp *cp)
+{
+ __libeth_xdp_complete_tx(sqe, cp, libeth_xdp_return_buff_bulk,
+ libeth_xsk_buff_free_slow);
+}
+
+/* Misc */
+
+u32 libeth_xdp_queue_threshold(u32 count);
+
+void __libeth_xdp_set_features(struct net_device *dev,
+ const struct xdp_metadata_ops *xmo,
+ u32 zc_segs,
+ const struct xsk_tx_metadata_ops *tmo);
+void libeth_xdp_set_redirect(struct net_device *dev, bool enable);
+
+/**
+ * libeth_xdp_set_features - set XDP features for netdev
+ * @dev: &net_device to configure
+ * @...: optional params, see __libeth_xdp_set_features()
+ *
+ * Set all the features libeth_xdp supports, including .ndo_xdp_xmit(). That
+ * said, it should be used only when XDPSQs are always available regardless
+ * of whether an XDP prog is attached to @dev.
+ */
+#define libeth_xdp_set_features(dev, ...) \
+ CONCATENATE(__libeth_xdp_feat, \
+ COUNT_ARGS(__VA_ARGS__))(dev, ##__VA_ARGS__)
+
+#define __libeth_xdp_feat0(dev) \
+ __libeth_xdp_set_features(dev, NULL, 0, NULL)
+#define __libeth_xdp_feat1(dev, xmo) \
+ __libeth_xdp_set_features(dev, xmo, 0, NULL)
+#define __libeth_xdp_feat2(dev, xmo, zc_segs) \
+ __libeth_xdp_set_features(dev, xmo, zc_segs, NULL)
+#define __libeth_xdp_feat3(dev, xmo, zc_segs, tmo) \
+ __libeth_xdp_set_features(dev, xmo, zc_segs, tmo)
+
+/**
+ * libeth_xdp_set_features_noredir - enable all libeth_xdp features w/o redir
+ * @dev: target &net_device
+ * @...: optional params, see __libeth_xdp_set_features()
+ *
+ * Enable everything except the .ndo_xdp_xmit() feature, use when XDPSQs are
+ * not available right after netdev registration.
+ */
+#define libeth_xdp_set_features_noredir(dev, ...) \
+ __libeth_xdp_set_features_noredir(dev, __UNIQUE_ID(dev_), \
+ ##__VA_ARGS__)
+
+#define __libeth_xdp_set_features_noredir(dev, ud, ...) do { \
+ struct net_device *ud = (dev); \
+ \
+ libeth_xdp_set_features(ud, ##__VA_ARGS__); \
+ libeth_xdp_set_redirect(ud, false); \
+} while (0)
+
+#define libeth_xsktmo ((const void *)GOLDEN_RATIO_PRIME)
+
+#endif /* __LIBETH_XDP_H */
diff --git a/include/net/libeth/xsk.h b/include/net/libeth/xsk.h
new file mode 100644
index 000000000000..481a7b28e6f2
--- /dev/null
+++ b/include/net/libeth/xsk.h
@@ -0,0 +1,685 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef __LIBETH_XSK_H
+#define __LIBETH_XSK_H
+
+#include <net/libeth/xdp.h>
+#include <net/xdp_sock_drv.h>
+
+/* ``XDP_TXMD_FLAGS_VALID`` is defined only under ``CONFIG_XDP_SOCKETS`` */
+#ifdef XDP_TXMD_FLAGS_VALID
+static_assert(XDP_TXMD_FLAGS_VALID <= LIBETH_XDP_TX_XSKMD);
+#endif
+
+/* ``XDP_TX`` bulking */
+
+/**
+ * libeth_xsk_tx_queue_head - internal helper for queueing XSk ``XDP_TX`` head
+ * @bq: XDP Tx bulk to queue the head frag to
+ * @xdp: XSk buffer with the head to queue
+ *
+ * Return: false if it's the only frag of the frame, true if it's an S/G frame.
+ */
+static inline bool libeth_xsk_tx_queue_head(struct libeth_xdp_tx_bulk *bq,
+ struct libeth_xdp_buff *xdp)
+{
+ bq->bulk[bq->count++] = (typeof(*bq->bulk)){
+ .xsk = xdp,
+ __libeth_xdp_tx_len(xdp->base.data_end - xdp->data,
+ LIBETH_XDP_TX_FIRST),
+ };
+
+ if (likely(!xdp_buff_has_frags(&xdp->base)))
+ return false;
+
+ bq->bulk[bq->count - 1].flags |= LIBETH_XDP_TX_MULTI;
+
+ return true;
+}
+
+/**
+ * libeth_xsk_tx_queue_frag - internal helper for queueing XSk ``XDP_TX`` frag
+ * @bq: XDP Tx bulk to queue the frag to
+ * @frag: XSk frag to queue
+ */
+static inline void libeth_xsk_tx_queue_frag(struct libeth_xdp_tx_bulk *bq,
+ struct libeth_xdp_buff *frag)
+{
+ bq->bulk[bq->count++] = (typeof(*bq->bulk)){
+ .xsk = frag,
+ __libeth_xdp_tx_len(frag->base.data_end - frag->data),
+ };
+}
+
+/**
+ * libeth_xsk_tx_queue_bulk - internal helper for queueing XSk ``XDP_TX`` frame
+ * @bq: XDP Tx bulk to queue the frame to
+ * @xdp: XSk buffer to queue
+ * @flush_bulk: driver callback to flush the bulk to the HW queue
+ *
+ * Return: true on success, false on flush error.
+ */
+static __always_inline bool
+libeth_xsk_tx_queue_bulk(struct libeth_xdp_tx_bulk *bq,
+ struct libeth_xdp_buff *xdp,
+ bool (*flush_bulk)(struct libeth_xdp_tx_bulk *bq,
+ u32 flags))
+{
+ bool ret = true;
+
+ if (unlikely(bq->count == LIBETH_XDP_TX_BULK) &&
+ unlikely(!flush_bulk(bq, LIBETH_XDP_TX_XSK))) {
+ libeth_xsk_buff_free_slow(xdp);
+ return false;
+ }
+
+ if (!libeth_xsk_tx_queue_head(bq, xdp))
+ goto out;
+
+ for (const struct libeth_xdp_buff *head = xdp; ; ) {
+ xdp = container_of(xsk_buff_get_frag(&head->base),
+ typeof(*xdp), base);
+ if (!xdp)
+ break;
+
+ if (unlikely(bq->count == LIBETH_XDP_TX_BULK) &&
+ unlikely(!flush_bulk(bq, LIBETH_XDP_TX_XSK))) {
+ ret = false;
+ break;
+ }
+
+ libeth_xsk_tx_queue_frag(bq, xdp);
+ }
+
+out:
+ bq->bulk[bq->count - 1].flags |= LIBETH_XDP_TX_LAST;
+
+ return ret;
+}
+
+/**
+ * libeth_xsk_tx_fill_buf - internal helper to fill XSk ``XDP_TX`` &libeth_sqe
+ * @frm: XDP Tx frame from the bulk
+ * @i: index on the HW queue
+ * @sq: XDPSQ abstraction for the queue
+ * @priv: private data
+ *
+ * Return: XDP Tx descriptor with the synced DMA and other info to pass to
+ * the driver callback.
+ */
+static inline struct libeth_xdp_tx_desc
+libeth_xsk_tx_fill_buf(struct libeth_xdp_tx_frame frm, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv)
+{
+ struct libeth_xdp_buff *xdp = frm.xsk;
+ struct libeth_xdp_tx_desc desc = {
+ .addr = xsk_buff_xdp_get_dma(&xdp->base),
+ .opts = frm.opts,
+ };
+ struct libeth_sqe *sqe;
+
+ xsk_buff_raw_dma_sync_for_device(sq->pool, desc.addr, desc.len);
+
+ sqe = &sq->sqes[i];
+ sqe->xsk = xdp;
+
+ if (!(desc.flags & LIBETH_XDP_TX_FIRST)) {
+ sqe->type = LIBETH_SQE_XSK_TX_FRAG;
+ return desc;
+ }
+
+ sqe->type = LIBETH_SQE_XSK_TX;
+ libeth_xdp_tx_fill_stats(sqe, &desc,
+ xdp_get_shared_info_from_buff(&xdp->base));
+
+ return desc;
+}
+
+/**
+ * libeth_xsk_tx_flush_bulk - wrapper to define flush of XSk ``XDP_TX`` bulk
+ * @bq: bulk to flush
+ * @flags: Tx flags, see __libeth_xdp_tx_flush_bulk()
+ * @prep: driver callback to prepare the queue
+ * @xmit: driver callback to fill a HW descriptor
+ *
+ * Use via LIBETH_XSK_DEFINE_FLUSH_TX() to define an XSk ``XDP_TX`` driver
+ * callback.
+ */
+#define libeth_xsk_tx_flush_bulk(bq, flags, prep, xmit) \
+ __libeth_xdp_tx_flush_bulk(bq, (flags) | LIBETH_XDP_TX_XSK, prep, \
+ libeth_xsk_tx_fill_buf, xmit)
+
+/* XSk TMO */
+
+/**
+ * libeth_xsktmo_req_csum - XSk Tx metadata op to request checksum offload
+ * @csum_start: unused
+ * @csum_offset: unused
+ * @priv: &libeth_xdp_tx_desc from the filling helper
+ *
+ * Generic implementation of ::tmo_request_checksum. Works only when HW doesn't
+ * require filling checksum offsets and other parameters beside the checksum
+ * request bit.
+ * Consider using within @libeth_xsktmo unless the driver requires HW-specific
+ * callbacks.
+ */
+static inline void libeth_xsktmo_req_csum(u16 csum_start, u16 csum_offset,
+ void *priv)
+{
+ ((struct libeth_xdp_tx_desc *)priv)->flags |= LIBETH_XDP_TX_CSUM;
+}
+
+/* Only to inline the callbacks below, use @libeth_xsktmo in drivers instead */
+static const struct xsk_tx_metadata_ops __libeth_xsktmo = {
+ .tmo_request_checksum = libeth_xsktmo_req_csum,
+};
+
+/**
+ * __libeth_xsk_xmit_fill_buf_md - internal helper to prepare XSk xmit w/meta
+ * @xdesc: &xdp_desc from the XSk buffer pool
+ * @sq: XDPSQ abstraction for the queue
+ * @priv: XSk Tx metadata ops
+ *
+ * Same as __libeth_xsk_xmit_fill_buf(), but requests metadata pointer and
+ * fills additional fields in &libeth_xdp_tx_desc to ask for metadata offload.
+ *
+ * Return: XDP Tx descriptor with the DMA, metadata request bits, and other
+ * info to pass to the driver callback.
+ */
+static __always_inline struct libeth_xdp_tx_desc
+__libeth_xsk_xmit_fill_buf_md(const struct xdp_desc *xdesc,
+ const struct libeth_xdpsq *sq,
+ u64 priv)
+{
+ const struct xsk_tx_metadata_ops *tmo = libeth_xdp_priv_to_ptr(priv);
+ struct libeth_xdp_tx_desc desc;
+ struct xdp_desc_ctx ctx;
+
+ ctx = xsk_buff_raw_get_ctx(sq->pool, xdesc->addr);
+ desc = (typeof(desc)){
+ .addr = ctx.dma,
+ __libeth_xdp_tx_len(xdesc->len),
+ };
+
+ BUILD_BUG_ON(!__builtin_constant_p(tmo == libeth_xsktmo));
+ tmo = tmo == libeth_xsktmo ? &__libeth_xsktmo : tmo;
+
+ xsk_tx_metadata_request(ctx.meta, tmo, &desc);
+
+ return desc;
+}
+
+/* XSk xmit implementation */
+
+/**
+ * __libeth_xsk_xmit_fill_buf - internal helper to prepare XSk xmit w/o meta
+ * @xdesc: &xdp_desc from the XSk buffer pool
+ * @sq: XDPSQ abstraction for the queue
+ *
+ * Return: XDP Tx descriptor with the DMA and other info to pass to
+ * the driver callback.
+ */
+static inline struct libeth_xdp_tx_desc
+__libeth_xsk_xmit_fill_buf(const struct xdp_desc *xdesc,
+ const struct libeth_xdpsq *sq)
+{
+ return (struct libeth_xdp_tx_desc){
+ .addr = xsk_buff_raw_get_dma(sq->pool, xdesc->addr),
+ __libeth_xdp_tx_len(xdesc->len),
+ };
+}
+
+/**
+ * libeth_xsk_xmit_fill_buf - internal helper to prepare an XSk xmit
+ * @frm: &xdp_desc from the XSk buffer pool
+ * @i: index on the HW queue
+ * @sq: XDPSQ abstraction for the queue
+ * @priv: XSk Tx metadata ops
+ *
+ * Depending on the metadata ops presence (determined at compile time), calls
+ * the quickest helper to build a libeth XDP Tx descriptor.
+ *
+ * Return: XDP Tx descriptor with the synced DMA, metadata request bits,
+ * and other info to pass to the driver callback.
+ */
+static __always_inline struct libeth_xdp_tx_desc
+libeth_xsk_xmit_fill_buf(struct libeth_xdp_tx_frame frm, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv)
+{
+ struct libeth_xdp_tx_desc desc;
+
+ if (priv)
+ desc = __libeth_xsk_xmit_fill_buf_md(&frm.desc, sq, priv);
+ else
+ desc = __libeth_xsk_xmit_fill_buf(&frm.desc, sq);
+
+ desc.flags |= xsk_is_eop_desc(&frm.desc) ? LIBETH_XDP_TX_LAST : 0;
+
+ xsk_buff_raw_dma_sync_for_device(sq->pool, desc.addr, desc.len);
+
+ return desc;
+}
+
+/**
+ * libeth_xsk_xmit_do_bulk - send XSk xmit frames
+ * @pool: XSk buffer pool containing the frames to send
+ * @xdpsq: opaque pointer to driver's XDPSQ struct
+ * @budget: maximum number of frames can be sent
+ * @tmo: optional XSk Tx metadata ops
+ * @prep: driver callback to build a &libeth_xdpsq
+ * @xmit: driver callback to put frames to a HW queue
+ * @finalize: driver callback to start a transmission
+ *
+ * Implements generic XSk xmit. Always turns on XSk Tx wakeup as it's assumed
+ * lazy cleaning is used and interrupts are disabled for the queue.
+ * HW descriptor filling is unrolled by ``LIBETH_XDP_TX_BATCH`` to optimize
+ * writes.
+ * Note that unlike other XDP Tx ops, the queue must be locked and cleaned
+ * prior to calling this function to already know available @budget.
+ * @prepare must only build a &libeth_xdpsq and return ``U32_MAX``.
+ *
+ * Return: false if @budget was exhausted, true otherwise.
+ */
+static __always_inline bool
+libeth_xsk_xmit_do_bulk(struct xsk_buff_pool *pool, void *xdpsq, u32 budget,
+ const struct xsk_tx_metadata_ops *tmo,
+ u32 (*prep)(void *xdpsq, struct libeth_xdpsq *sq),
+ void (*xmit)(struct libeth_xdp_tx_desc desc, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv),
+ void (*finalize)(void *xdpsq, bool sent, bool flush))
+{
+ const struct libeth_xdp_tx_frame *bulk;
+ bool wake;
+ u32 n;
+
+ wake = xsk_uses_need_wakeup(pool);
+ if (wake)
+ xsk_clear_tx_need_wakeup(pool);
+
+ n = xsk_tx_peek_release_desc_batch(pool, budget);
+ bulk = container_of(&pool->tx_descs[0], typeof(*bulk), desc);
+
+ libeth_xdp_tx_xmit_bulk(bulk, xdpsq, n, true,
+ libeth_xdp_ptr_to_priv(tmo), prep,
+ libeth_xsk_xmit_fill_buf, xmit);
+ finalize(xdpsq, n, true);
+
+ if (wake)
+ xsk_set_tx_need_wakeup(pool);
+
+ return n < budget;
+}
+
+/* Rx polling path */
+
+/**
+ * libeth_xsk_tx_init_bulk - initialize XDP Tx bulk for an XSk Rx NAPI poll
+ * @bq: bulk to initialize
+ * @prog: RCU pointer to the XDP program (never %NULL)
+ * @dev: target &net_device
+ * @xdpsqs: array of driver XDPSQ structs
+ * @num: number of active XDPSQs, the above array length
+ *
+ * Should be called on an onstack XDP Tx bulk before the XSk NAPI polling loop.
+ * Initializes all the needed fields to run libeth_xdp functions.
+ * Never checks if @prog is %NULL or @num == 0 as XDP must always be enabled
+ * when hitting this path.
+ */
+#define libeth_xsk_tx_init_bulk(bq, prog, dev, xdpsqs, num) \
+ __libeth_xdp_tx_init_bulk(bq, prog, dev, xdpsqs, num, true, \
+ __UNIQUE_ID(bq_), __UNIQUE_ID(nqs_))
+
+struct libeth_xdp_buff *libeth_xsk_buff_add_frag(struct libeth_xdp_buff *head,
+ struct libeth_xdp_buff *xdp);
+
+/**
+ * libeth_xsk_process_buff - attach XSk Rx buffer to &libeth_xdp_buff
+ * @head: head XSk buffer to attach the XSk buffer to (or %NULL)
+ * @xdp: XSk buffer to process
+ * @len: received data length from the descriptor
+ *
+ * If @head == %NULL, treats the XSk buffer as head and initializes
+ * the required fields. Otherwise, attaches the buffer as a frag.
+ * Already performs DMA sync-for-CPU and frame start prefetch
+ * (for head buffers only).
+ *
+ * Return: head XSk buffer on success or if the descriptor must be skipped
+ * (empty), %NULL if there is no space for a new frag.
+ */
+static inline struct libeth_xdp_buff *
+libeth_xsk_process_buff(struct libeth_xdp_buff *head,
+ struct libeth_xdp_buff *xdp, u32 len)
+{
+ if (unlikely(!len)) {
+ libeth_xsk_buff_free_slow(xdp);
+ return head;
+ }
+
+ xsk_buff_set_size(&xdp->base, len);
+ xsk_buff_dma_sync_for_cpu(&xdp->base);
+
+ if (head)
+ return libeth_xsk_buff_add_frag(head, xdp);
+
+ prefetch(xdp->data);
+
+ return xdp;
+}
+
+void libeth_xsk_buff_stats_frags(struct libeth_rq_napi_stats *rs,
+ const struct libeth_xdp_buff *xdp);
+
+u32 __libeth_xsk_run_prog_slow(struct libeth_xdp_buff *xdp,
+ const struct libeth_xdp_tx_bulk *bq,
+ enum xdp_action act, int ret);
+
+/**
+ * __libeth_xsk_run_prog - run XDP program on XSk buffer
+ * @xdp: XSk buffer to run the prog on
+ * @bq: buffer bulk for ``XDP_TX`` queueing
+ *
+ * Internal inline abstraction to run XDP program on XSk Rx path. Handles
+ * only the most common ``XDP_REDIRECT`` inline, the rest is processed
+ * externally.
+ * Reports an XDP prog exception on errors.
+ *
+ * Return: libeth_xdp prog verdict depending on the prog's verdict.
+ */
+static __always_inline u32
+__libeth_xsk_run_prog(struct libeth_xdp_buff *xdp,
+ const struct libeth_xdp_tx_bulk *bq)
+{
+ enum xdp_action act;
+ int ret = 0;
+
+ act = bpf_prog_run_xdp(bq->prog, &xdp->base);
+ if (unlikely(act != XDP_REDIRECT))
+rest:
+ return __libeth_xsk_run_prog_slow(xdp, bq, act, ret);
+
+ ret = xdp_do_redirect(bq->dev, &xdp->base, bq->prog);
+ if (unlikely(ret))
+ goto rest;
+
+ return LIBETH_XDP_REDIRECT;
+}
+
+/**
+ * libeth_xsk_run_prog - run XDP program on XSk path and handle all verdicts
+ * @xdp: XSk buffer to process
+ * @bq: XDP Tx bulk to queue ``XDP_TX`` buffers
+ * @fl: driver ``XDP_TX`` bulk flush callback
+ *
+ * Run the attached XDP program and handle all possible verdicts.
+ * Prefer using it via LIBETH_XSK_DEFINE_RUN{,_PASS,_PROG}().
+ *
+ * Return: libeth_xdp prog verdict depending on the prog's verdict.
+ */
+#define libeth_xsk_run_prog(xdp, bq, fl) \
+ __libeth_xdp_run_flush(xdp, bq, __libeth_xsk_run_prog, \
+ libeth_xsk_tx_queue_bulk, fl)
+
+/**
+ * __libeth_xsk_run_pass - helper to run XDP program and handle the result
+ * @xdp: XSk buffer to process
+ * @bq: XDP Tx bulk to queue ``XDP_TX`` frames
+ * @napi: NAPI to build an skb and pass it up the stack
+ * @rs: onstack libeth RQ stats
+ * @md: metadata that should be filled to the XSk buffer
+ * @prep: callback for filling the metadata
+ * @run: driver wrapper to run XDP program
+ * @populate: driver callback to populate an skb with the HW descriptor data
+ *
+ * Inline abstraction, XSk's counterpart of __libeth_xdp_run_pass(), see its
+ * doc for details.
+ *
+ * Return: false if the polling loop must be exited due to lack of free
+ * buffers, true otherwise.
+ */
+static __always_inline bool
+__libeth_xsk_run_pass(struct libeth_xdp_buff *xdp,
+ struct libeth_xdp_tx_bulk *bq, struct napi_struct *napi,
+ struct libeth_rq_napi_stats *rs, const void *md,
+ void (*prep)(struct libeth_xdp_buff *xdp,
+ const void *md),
+ u32 (*run)(struct libeth_xdp_buff *xdp,
+ struct libeth_xdp_tx_bulk *bq),
+ bool (*populate)(struct sk_buff *skb,
+ const struct libeth_xdp_buff *xdp,
+ struct libeth_rq_napi_stats *rs))
+{
+ struct sk_buff *skb;
+ u32 act;
+
+ rs->bytes += xdp->base.data_end - xdp->data;
+ rs->packets++;
+
+ if (unlikely(xdp_buff_has_frags(&xdp->base)))
+ libeth_xsk_buff_stats_frags(rs, xdp);
+
+ if (prep && (!__builtin_constant_p(!!md) || md))
+ prep(xdp, md);
+
+ act = run(xdp, bq);
+ if (likely(act == LIBETH_XDP_REDIRECT))
+ return true;
+
+ if (act != LIBETH_XDP_PASS)
+ return act != LIBETH_XDP_ABORTED;
+
+ skb = xdp_build_skb_from_zc(&xdp->base);
+ if (unlikely(!skb)) {
+ libeth_xsk_buff_free_slow(xdp);
+ return true;
+ }
+
+ if (unlikely(!populate(skb, xdp, rs))) {
+ napi_consume_skb(skb, true);
+ return true;
+ }
+
+ napi_gro_receive(napi, skb);
+
+ return true;
+}
+
+/**
+ * libeth_xsk_run_pass - helper to run XDP program and handle the result
+ * @xdp: XSk buffer to process
+ * @bq: XDP Tx bulk to queue ``XDP_TX`` frames
+ * @napi: NAPI to build an skb and pass it up the stack
+ * @rs: onstack libeth RQ stats
+ * @desc: pointer to the HW descriptor for that frame
+ * @run: driver wrapper to run XDP program
+ * @populate: driver callback to populate an skb with the HW descriptor data
+ *
+ * Wrapper around the underscored version when "fill the descriptor metadata"
+ * means just writing the pointer to the HW descriptor as @xdp->desc.
+ */
+#define libeth_xsk_run_pass(xdp, bq, napi, rs, desc, run, populate) \
+ __libeth_xsk_run_pass(xdp, bq, napi, rs, desc, libeth_xdp_prep_desc, \
+ run, populate)
+
+/**
+ * libeth_xsk_finalize_rx - finalize XDPSQ after an XSk NAPI polling loop
+ * @bq: ``XDP_TX`` frame bulk
+ * @flush: driver callback to flush the bulk
+ * @finalize: driver callback to start sending the frames and run the timer
+ *
+ * Flush the bulk if there are frames left to send, kick the queue and flush
+ * the XDP maps.
+ */
+#define libeth_xsk_finalize_rx(bq, flush, finalize) \
+ __libeth_xdp_finalize_rx(bq, LIBETH_XDP_TX_XSK, flush, finalize)
+
+/*
+ * Helpers to reduce boilerplate code in drivers.
+ *
+ * Typical driver XSk Rx flow would be (excl. bulk and buff init, frag attach):
+ *
+ * LIBETH_XDP_DEFINE_START();
+ * LIBETH_XSK_DEFINE_FLUSH_TX(static driver_xsk_flush_tx, driver_xsk_tx_prep,
+ * driver_xdp_xmit);
+ * LIBETH_XSK_DEFINE_RUN(static driver_xsk_run, driver_xsk_run_prog,
+ * driver_xsk_flush_tx, driver_populate_skb);
+ * LIBETH_XSK_DEFINE_FINALIZE(static driver_xsk_finalize_rx,
+ * driver_xsk_flush_tx, driver_xdp_finalize_sq);
+ * LIBETH_XDP_DEFINE_END();
+ *
+ * This will build a set of 4 static functions. The compiler is free to decide
+ * whether to inline them.
+ * Then, in the NAPI polling function:
+ *
+ * while (packets < budget) {
+ * // ...
+ * if (!driver_xsk_run(xdp, &bq, napi, &rs, desc))
+ * break;
+ * }
+ * driver_xsk_finalize_rx(&bq);
+ */
+
+/**
+ * LIBETH_XSK_DEFINE_FLUSH_TX - define a driver XSk ``XDP_TX`` flush function
+ * @name: name of the function to define
+ * @prep: driver callback to clean an XDPSQ
+ * @xmit: driver callback to write a HW Tx descriptor
+ */
+#define LIBETH_XSK_DEFINE_FLUSH_TX(name, prep, xmit) \
+ __LIBETH_XDP_DEFINE_FLUSH_TX(name, prep, xmit, xsk)
+
+/**
+ * LIBETH_XSK_DEFINE_RUN_PROG - define a driver XDP program run function
+ * @name: name of the function to define
+ * @flush: driver callback to flush an XSk ``XDP_TX`` bulk
+ */
+#define LIBETH_XSK_DEFINE_RUN_PROG(name, flush) \
+ u32 __LIBETH_XDP_DEFINE_RUN_PROG(name, flush, xsk)
+
+/**
+ * LIBETH_XSK_DEFINE_RUN_PASS - define a driver buffer process + pass function
+ * @name: name of the function to define
+ * @run: driver callback to run XDP program (above)
+ * @populate: driver callback to fill an skb with HW descriptor info
+ */
+#define LIBETH_XSK_DEFINE_RUN_PASS(name, run, populate) \
+ bool __LIBETH_XDP_DEFINE_RUN_PASS(name, run, populate, xsk)
+
+/**
+ * LIBETH_XSK_DEFINE_RUN - define a driver buffer process, run + pass function
+ * @name: name of the function to define
+ * @run: name of the XDP prog run function to define
+ * @flush: driver callback to flush an XSk ``XDP_TX`` bulk
+ * @populate: driver callback to fill an skb with HW descriptor info
+ */
+#define LIBETH_XSK_DEFINE_RUN(name, run, flush, populate) \
+ __LIBETH_XDP_DEFINE_RUN(name, run, flush, populate, XSK)
+
+/**
+ * LIBETH_XSK_DEFINE_FINALIZE - define a driver XSk NAPI poll finalize function
+ * @name: name of the function to define
+ * @flush: driver callback to flush an XSk ``XDP_TX`` bulk
+ * @finalize: driver callback to finalize an XDPSQ and run the timer
+ */
+#define LIBETH_XSK_DEFINE_FINALIZE(name, flush, finalize) \
+ __LIBETH_XDP_DEFINE_FINALIZE(name, flush, finalize, xsk)
+
+/* Refilling */
+
+/**
+ * struct libeth_xskfq - structure representing an XSk buffer (fill) queue
+ * @fp: hotpath part of the structure
+ * @pool: &xsk_buff_pool for buffer management
+ * @fqes: array of XSk buffer pointers
+ * @descs: opaque pointer to the HW descriptor array
+ * @ntu: index of the next buffer to poll
+ * @count: number of descriptors/buffers the queue has
+ * @pending: current number of XSkFQEs to refill
+ * @thresh: threshold below which the queue is refilled
+ * @buf_len: HW-writeable length per each buffer
+ * @nid: ID of the closest NUMA node with memory
+ */
+struct libeth_xskfq {
+ struct_group_tagged(libeth_xskfq_fp, fp,
+ struct xsk_buff_pool *pool;
+ struct libeth_xdp_buff **fqes;
+ void *descs;
+
+ u32 ntu;
+ u32 count;
+ );
+
+ /* Cold fields */
+ u32 pending;
+ u32 thresh;
+
+ u32 buf_len;
+ int nid;
+};
+
+int libeth_xskfq_create(struct libeth_xskfq *fq);
+void libeth_xskfq_destroy(struct libeth_xskfq *fq);
+
+/**
+ * libeth_xsk_buff_xdp_get_dma - get DMA address of XSk &libeth_xdp_buff
+ * @xdp: buffer to get the DMA addr for
+ */
+#define libeth_xsk_buff_xdp_get_dma(xdp) \
+ xsk_buff_xdp_get_dma(&(xdp)->base)
+
+/**
+ * libeth_xskfqe_alloc - allocate @n XSk Rx buffers
+ * @fq: hotpath part of the XSkFQ, usually onstack
+ * @n: number of buffers to allocate
+ * @fill: driver callback to write DMA addresses to HW descriptors
+ *
+ * Note that @fq->ntu gets updated, but ::pending must be recalculated
+ * by the caller.
+ *
+ * Return: number of buffers refilled.
+ */
+static __always_inline u32
+libeth_xskfqe_alloc(struct libeth_xskfq_fp *fq, u32 n,
+ void (*fill)(const struct libeth_xskfq_fp *fq, u32 i))
+{
+ u32 this, ret, done = 0;
+ struct xdp_buff **xskb;
+
+ this = fq->count - fq->ntu;
+ if (likely(this > n))
+ this = n;
+
+again:
+ xskb = (typeof(xskb))&fq->fqes[fq->ntu];
+ ret = xsk_buff_alloc_batch(fq->pool, xskb, this);
+
+ for (u32 i = 0, ntu = fq->ntu; likely(i < ret); i++)
+ fill(fq, ntu + i);
+
+ done += ret;
+ fq->ntu += ret;
+
+ if (likely(fq->ntu < fq->count) || unlikely(ret < this))
+ goto out;
+
+ fq->ntu = 0;
+
+ if (this < n) {
+ this = n - this;
+ goto again;
+ }
+
+out:
+ return done;
+}
+
+/* .ndo_xsk_wakeup */
+
+void libeth_xsk_init_wakeup(call_single_data_t *csd, struct napi_struct *napi);
+void libeth_xsk_wakeup(call_single_data_t *csd, u32 qid);
+
+/* Pool setup */
+
+int libeth_xsk_setup_pool(struct net_device *dev, u32 qid, bool enable);
+
+#endif /* __LIBETH_XSK_H */
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index c306ebe379a0..26232f603e33 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -138,12 +138,12 @@ int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len,
static inline void lwtunnel_set_redirect(struct dst_entry *dst)
{
if (lwtunnel_output_redirect(dst->lwtstate)) {
- dst->lwtstate->orig_output = dst->output;
- dst->output = lwtunnel_output;
+ dst->lwtstate->orig_output = READ_ONCE(dst->output);
+ WRITE_ONCE(dst->output, lwtunnel_output);
}
if (lwtunnel_input_redirect(dst->lwtstate)) {
- dst->lwtstate->orig_input = dst->input;
- dst->input = lwtunnel_input;
+ dst->lwtstate->orig_input = READ_ONCE(dst->input);
+ WRITE_ONCE(dst->input, lwtunnel_input);
}
}
#else
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 82617579d910..a45e4bee65d4 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -758,6 +758,8 @@ struct ieee80211_parsed_tpe {
* be updated to 1, even if bss_param_ch_cnt didn't change. This allows
* the link to know that it heard the latest value from its own beacon
* (as opposed to hearing its value from another link's beacon).
+ * @s1g_long_beacon_period: number of beacon intervals between each long
+ * beacon transmission.
*/
struct ieee80211_bss_conf {
struct ieee80211_vif *vif;
@@ -857,6 +859,8 @@ struct ieee80211_bss_conf {
u8 bss_param_ch_cnt;
u8 bss_param_ch_cnt_link_id;
+
+ u8 s1g_long_beacon_period;
};
/**
@@ -2428,6 +2432,7 @@ struct ieee80211_sta_aggregates {
* @he_cap: HE capabilities of this STA
* @he_6ghz_capa: on 6 GHz, holds the HE 6 GHz band capabilities
* @eht_cap: EHT capabilities of this STA
+ * @s1g_cap: S1G capabilities of this STA
* @agg: per-link data for multi-link aggregation
* @bandwidth: current bandwidth the station can receive with
* @rx_nss: in HT/VHT, the maximum number of spatial streams the
@@ -2450,6 +2455,7 @@ struct ieee80211_link_sta {
struct ieee80211_sta_he_cap he_cap;
struct ieee80211_he_6ghz_capa he_6ghz_capa;
struct ieee80211_sta_eht_cap eht_cap;
+ struct ieee80211_sta_s1g_cap s1g_cap;
struct ieee80211_sta_aggregates agg;
@@ -2850,8 +2856,6 @@ struct ieee80211_txq {
*
* @IEEE80211_HW_DISALLOW_PUNCTURING: HW requires disabling puncturing in EHT
* and connecting with a lower bandwidth instead
- * @IEEE80211_HW_DISALLOW_PUNCTURING_5GHZ: HW requires disabling puncturing in
- * EHT in 5 GHz and connecting with a lower bandwidth instead
*
* @IEEE80211_HW_HANDLES_QUIET_CSA: HW/driver handles quieting for CSA, so
* no need to stop queues. This really should be set by a driver that
@@ -2921,7 +2925,6 @@ enum ieee80211_hw_flags {
IEEE80211_HW_DETECTS_COLOR_COLLISION,
IEEE80211_HW_MLO_MCAST_MULTI_LINK_TX,
IEEE80211_HW_DISALLOW_PUNCTURING,
- IEEE80211_HW_DISALLOW_PUNCTURING_5GHZ,
IEEE80211_HW_HANDLES_QUIET_CSA,
IEEE80211_HW_STRICT,
@@ -4133,6 +4136,15 @@ struct ieee80211_prep_tx_info {
* Statistics that the driver doesn't fill will be filled by mac80211.
* The callback can sleep.
*
+ * @link_sta_statistics: Get link statistics for this station. For example with
+ * beacon filtering, the statistics kept by mac80211 might not be
+ * accurate, so let the driver pre-fill the statistics. The driver can
+ * fill most of the values (indicating which by setting the filled
+ * bitmap), but not all of them make sense - see the source for which
+ * ones are possible.
+ * Statistics that the driver doesn't fill will be filled by mac80211.
+ * The callback can sleep.
+ *
* @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max),
* bursting) for a hardware TX queue.
* Returns a negative error code on failure.
@@ -4302,6 +4314,8 @@ struct ieee80211_prep_tx_info {
* @mgd_complete_tx: Notify the driver that the response frame for a previously
* transmitted frame announced with @mgd_prepare_tx was received, the data
* is filled similarly to @mgd_prepare_tx though the duration is not used.
+ * Note that this isn't always called for each mgd_prepare_tx() call, for
+ * example for SAE the 'confirm' messages can be on the air in any order.
*
* @mgd_protect_tdls_discover: Protect a TDLS discovery session. After sending
* a TDLS discovery-request, we expect a reply to arrive on the AP's
@@ -4466,6 +4480,8 @@ struct ieee80211_prep_tx_info {
* new links bitmaps may be 0 if going from/to a non-MLO situation.
* The @old array contains pointers to the old bss_conf structures
* that were already removed, in case they're needed.
+ * Note that removal of link should always succeed, so the return value
+ * will be ignored in a removal only case.
* This callback can sleep.
* @change_sta_links: Change the valid links of a station, similar to
* @change_vif_links. This callback can sleep.
@@ -4508,7 +4524,7 @@ struct ieee80211_ops {
enum nl80211_iftype new_type, bool p2p);
void (*remove_interface)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
- int (*config)(struct ieee80211_hw *hw, u32 changed);
+ int (*config)(struct ieee80211_hw *hw, int radio_idx, u32 changed);
void (*bss_info_changed)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@@ -4571,8 +4587,10 @@ struct ieee80211_ops {
void (*get_key_seq)(struct ieee80211_hw *hw,
struct ieee80211_key_conf *key,
struct ieee80211_key_seq *seq);
- int (*set_frag_threshold)(struct ieee80211_hw *hw, u32 value);
- int (*set_rts_threshold)(struct ieee80211_hw *hw, u32 value);
+ int (*set_frag_threshold)(struct ieee80211_hw *hw, int radio_idx,
+ u32 value);
+ int (*set_rts_threshold)(struct ieee80211_hw *hw, int radio_idx,
+ u32 value);
int (*sta_add)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
int (*sta_remove)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -4627,6 +4645,10 @@ struct ieee80211_ops {
s64 offset);
void (*reset_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
int (*tx_last_beacon)(struct ieee80211_hw *hw);
+ void (*link_sta_statistics)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ struct link_station_info *link_sinfo);
/**
* @ampdu_action:
@@ -4665,7 +4687,8 @@ struct ieee80211_ops {
int (*get_survey)(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
void (*rfkill_poll)(struct ieee80211_hw *hw);
- void (*set_coverage_class)(struct ieee80211_hw *hw, s16 coverage_class);
+ void (*set_coverage_class)(struct ieee80211_hw *hw, int radio_idx,
+ s16 coverage_class);
#ifdef CONFIG_NL80211_TESTMODE
int (*testmode_cmd)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len);
@@ -4680,8 +4703,10 @@ struct ieee80211_ops {
void (*channel_switch)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *ch_switch);
- int (*set_antenna)(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
- int (*get_antenna)(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
+ int (*set_antenna)(struct ieee80211_hw *hw, int radio_idx,
+ u32 tx_ant, u32 rx_ant);
+ int (*get_antenna)(struct ieee80211_hw *hw, int radio_idx,
+ u32 *tx_ant, u32 *rx_ant);
int (*remain_on_channel)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -6008,21 +6033,12 @@ void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf,
int tid, struct ieee80211_key_seq *seq);
/**
- * ieee80211_remove_key - remove the given key
- * @keyconf: the parameter passed with the set key
- *
- * Context: Must be called with the wiphy mutex held.
- *
- * Remove the given key. If the key was uploaded to the hardware at the
- * time this function is called, it is not deleted in the hardware but
- * instead assumed to have been removed already.
- */
-void ieee80211_remove_key(struct ieee80211_key_conf *keyconf);
-
-/**
* ieee80211_gtk_rekey_add - add a GTK key from rekeying during WoWLAN
* @vif: the virtual interface to add the key on
- * @keyconf: new key data
+ * @idx: the keyidx of the key
+ * @key_data: the key data
+ * @key_len: the key data. Might be bigger than the actual key length,
+ * but not smaller (for the driver convinence)
* @link_id: the link id of the key or -1 for non-MLO
*
* When GTK rekeying was done while the system was suspended, (a) new
@@ -6045,13 +6061,11 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf);
* for the new key for each TID to set up sequence counters properly.
*
* IMPORTANT: If this replaces a key that is present in the hardware,
- * then it will attempt to remove it during this call. In many cases
- * this isn't what you want, so call ieee80211_remove_key() first for
- * the key that's being replaced.
+ * then it will attempt to remove it during this call.
*/
struct ieee80211_key_conf *
ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf,
+ u8 idx, u8 *key_data, u8 key_len,
int link_id);
/**
@@ -7242,13 +7256,14 @@ void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif);
* ieee80211_ave_rssi - report the average RSSI for the specified interface
*
* @vif: the specified virtual interface
+ * @link_id: the link ID for MLO, or -1 for non-MLO
*
* Note: This function assumes that the given vif is valid.
*
* Return: The average RSSI value for the requested interface, or 0 if not
* applicable.
*/
-int ieee80211_ave_rssi(struct ieee80211_vif *vif);
+int ieee80211_ave_rssi(struct ieee80211_vif *vif, int link_id);
/**
* ieee80211_report_wowlan_wakeup - report WoWLAN wakeup
diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
index 3ce56a816425..57df78cfbf82 100644
--- a/include/net/mana/gdma.h
+++ b/include/net/mana/gdma.h
@@ -10,6 +10,7 @@
#include "shm_channel.h"
#define GDMA_STATUS_MORE_ENTRIES 0x00000105
+#define GDMA_STATUS_CMD_UNSUPPORTED 0xffffffff
/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
* them are naturally aligned and hence don't need __packed.
@@ -58,9 +59,10 @@ enum gdma_eqe_type {
GDMA_EQE_HWC_INIT_EQ_ID_DB = 129,
GDMA_EQE_HWC_INIT_DATA = 130,
GDMA_EQE_HWC_INIT_DONE = 131,
- GDMA_EQE_HWC_SOC_RECONFIG = 132,
+ GDMA_EQE_HWC_FPGA_RECONFIG = 132,
GDMA_EQE_HWC_SOC_RECONFIG_DATA = 133,
GDMA_EQE_HWC_SOC_SERVICE = 134,
+ GDMA_EQE_HWC_RESET_REQUEST = 135,
GDMA_EQE_RNIC_QP_FATAL = 176,
};
@@ -388,7 +390,7 @@ struct gdma_context {
unsigned int max_num_queues;
unsigned int max_num_msix;
unsigned int num_msix_usable;
- struct gdma_irq_context *irq_contexts;
+ struct xarray irq_contexts;
/* L2 MTU */
u16 adapter_mtu;
@@ -403,6 +405,8 @@ struct gdma_context {
u32 test_event_eq_id;
bool is_pf;
+ bool in_service;
+
phys_addr_t bar0_pa;
void __iomem *bar0_va;
void __iomem *shm_base;
@@ -578,12 +582,24 @@ enum {
/* Driver can handle holes (zeros) in the device list */
#define GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP BIT(11)
+/* Driver supports dynamic MSI-X vector allocation */
+#define GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT BIT(13)
+
+/* Driver can self reset on EQE notification */
+#define GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE BIT(14)
+
+/* Driver can self reset on FPGA Reconfig EQE notification */
+#define GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE BIT(17)
+
#define GDMA_DRV_CAP_FLAGS1 \
(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \
GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT | \
- GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP)
+ GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP | \
+ GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT | \
+ GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE | \
+ GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE)
#define GDMA_DRV_CAP_FLAGS2 0
@@ -910,4 +926,9 @@ void mana_unregister_debugfs(void);
int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event);
+int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state);
+int mana_gd_resume(struct pci_dev *pdev);
+
+bool mana_need_log(struct gdma_context *gc, int err);
+
#endif /* _GDMA_H */
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index 9abb66461211..e1030a7d2daa 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -5,6 +5,7 @@
#define _MANA_H
#include <net/xdp.h>
+#include <net/net_shaper.h>
#include "gdma.h"
#include "hw_channel.h"
@@ -404,6 +405,65 @@ struct mana_ethtool_stats {
u64 rx_cqe_unknown_type;
};
+struct mana_ethtool_phy_stats {
+ /* Drop Counters */
+ u64 rx_pkt_drop_phy;
+ u64 tx_pkt_drop_phy;
+
+ /* Per TC traffic Counters */
+ u64 rx_pkt_tc0_phy;
+ u64 tx_pkt_tc0_phy;
+ u64 rx_pkt_tc1_phy;
+ u64 tx_pkt_tc1_phy;
+ u64 rx_pkt_tc2_phy;
+ u64 tx_pkt_tc2_phy;
+ u64 rx_pkt_tc3_phy;
+ u64 tx_pkt_tc3_phy;
+ u64 rx_pkt_tc4_phy;
+ u64 tx_pkt_tc4_phy;
+ u64 rx_pkt_tc5_phy;
+ u64 tx_pkt_tc5_phy;
+ u64 rx_pkt_tc6_phy;
+ u64 tx_pkt_tc6_phy;
+ u64 rx_pkt_tc7_phy;
+ u64 tx_pkt_tc7_phy;
+
+ u64 rx_byte_tc0_phy;
+ u64 tx_byte_tc0_phy;
+ u64 rx_byte_tc1_phy;
+ u64 tx_byte_tc1_phy;
+ u64 rx_byte_tc2_phy;
+ u64 tx_byte_tc2_phy;
+ u64 rx_byte_tc3_phy;
+ u64 tx_byte_tc3_phy;
+ u64 rx_byte_tc4_phy;
+ u64 tx_byte_tc4_phy;
+ u64 rx_byte_tc5_phy;
+ u64 tx_byte_tc5_phy;
+ u64 rx_byte_tc6_phy;
+ u64 tx_byte_tc6_phy;
+ u64 rx_byte_tc7_phy;
+ u64 tx_byte_tc7_phy;
+
+ /* Per TC pause Counters */
+ u64 rx_pause_tc0_phy;
+ u64 tx_pause_tc0_phy;
+ u64 rx_pause_tc1_phy;
+ u64 tx_pause_tc1_phy;
+ u64 rx_pause_tc2_phy;
+ u64 tx_pause_tc2_phy;
+ u64 rx_pause_tc3_phy;
+ u64 tx_pause_tc3_phy;
+ u64 rx_pause_tc4_phy;
+ u64 tx_pause_tc4_phy;
+ u64 rx_pause_tc5_phy;
+ u64 tx_pause_tc5_phy;
+ u64 rx_pause_tc6_phy;
+ u64 tx_pause_tc6_phy;
+ u64 rx_pause_tc7_phy;
+ u64 tx_pause_tc7_phy;
+};
+
struct mana_context {
struct gdma_dev *gdma_dev;
@@ -467,13 +527,22 @@ struct mana_port_context {
struct mutex vport_mutex;
int vport_use_count;
+ /* Net shaper handle*/
+ struct net_shaper_handle handle;
+
u16 port_idx;
+ /* Currently configured speed (mbps) */
+ u32 speed;
+ /* Maximum speed supported by the SKU (mbps) */
+ u32 max_speed;
bool port_is_up;
bool port_st_save; /* Saved port state */
struct mana_ethtool_stats eth_stats;
+ struct mana_ethtool_phy_stats phy_stats;
+
/* Debugfs */
struct dentry *mana_port_debugfs;
};
@@ -501,6 +570,10 @@ struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
void mana_query_gf_stats(struct mana_port_context *apc);
+int mana_query_link_cfg(struct mana_port_context *apc);
+int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed,
+ int enable_clamping);
+void mana_query_phy_stats(struct mana_port_context *apc);
int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues);
void mana_pre_dealloc_rxbufs(struct mana_port_context *apc);
@@ -527,6 +600,9 @@ enum mana_command_code {
MANA_FENCE_RQ = 0x20006,
MANA_CONFIG_VPORT_RX = 0x20007,
MANA_QUERY_VPORT_CONFIG = 0x20008,
+ MANA_QUERY_LINK_CONFIG = 0x2000A,
+ MANA_SET_BW_CLAMP = 0x2000B,
+ MANA_QUERY_PHY_STAT = 0x2000c,
/* Privileged commands for the PF mode */
MANA_REGISTER_FILTER = 0x28000,
@@ -535,6 +611,35 @@ enum mana_command_code {
MANA_DEREGISTER_HW_PORT = 0x28004,
};
+/* Query Link Configuration*/
+struct mana_query_link_config_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t vport;
+}; /* HW DATA */
+
+struct mana_query_link_config_resp {
+ struct gdma_resp_hdr hdr;
+ u32 qos_speed_mbps;
+ u8 qos_unconfigured;
+ u8 reserved1[3];
+ u32 link_speed_mbps;
+ u8 reserved2[4];
+}; /* HW DATA */
+
+/* Set Bandwidth Clamp*/
+struct mana_set_bw_clamp_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t vport;
+ enum TRI_STATE enable_clamping;
+ u32 link_speed_mbps;
+}; /* HW DATA */
+
+struct mana_set_bw_clamp_resp {
+ struct gdma_resp_hdr hdr;
+ u8 qos_unconfigured;
+ u8 reserved[7];
+}; /* HW DATA */
+
/* Query Device Configuration */
struct mana_query_device_cfg_req {
struct gdma_req_hdr hdr;
@@ -689,6 +794,74 @@ struct mana_query_gf_stat_resp {
u64 tx_err_gdma;
}; /* HW DATA */
+/* Query phy stats */
+struct mana_query_phy_stat_req {
+ struct gdma_req_hdr hdr;
+ u64 req_stats;
+}; /* HW DATA */
+
+struct mana_query_phy_stat_resp {
+ struct gdma_resp_hdr hdr;
+ u64 reported_stats;
+
+ /* Aggregate Drop Counters */
+ u64 rx_pkt_drop_phy;
+ u64 tx_pkt_drop_phy;
+
+ /* Per TC(Traffic class) traffic Counters */
+ u64 rx_pkt_tc0_phy;
+ u64 tx_pkt_tc0_phy;
+ u64 rx_pkt_tc1_phy;
+ u64 tx_pkt_tc1_phy;
+ u64 rx_pkt_tc2_phy;
+ u64 tx_pkt_tc2_phy;
+ u64 rx_pkt_tc3_phy;
+ u64 tx_pkt_tc3_phy;
+ u64 rx_pkt_tc4_phy;
+ u64 tx_pkt_tc4_phy;
+ u64 rx_pkt_tc5_phy;
+ u64 tx_pkt_tc5_phy;
+ u64 rx_pkt_tc6_phy;
+ u64 tx_pkt_tc6_phy;
+ u64 rx_pkt_tc7_phy;
+ u64 tx_pkt_tc7_phy;
+
+ u64 rx_byte_tc0_phy;
+ u64 tx_byte_tc0_phy;
+ u64 rx_byte_tc1_phy;
+ u64 tx_byte_tc1_phy;
+ u64 rx_byte_tc2_phy;
+ u64 tx_byte_tc2_phy;
+ u64 rx_byte_tc3_phy;
+ u64 tx_byte_tc3_phy;
+ u64 rx_byte_tc4_phy;
+ u64 tx_byte_tc4_phy;
+ u64 rx_byte_tc5_phy;
+ u64 tx_byte_tc5_phy;
+ u64 rx_byte_tc6_phy;
+ u64 tx_byte_tc6_phy;
+ u64 rx_byte_tc7_phy;
+ u64 tx_byte_tc7_phy;
+
+ /* Per TC(Traffic Class) pause Counters */
+ u64 rx_pause_tc0_phy;
+ u64 tx_pause_tc0_phy;
+ u64 rx_pause_tc1_phy;
+ u64 tx_pause_tc1_phy;
+ u64 rx_pause_tc2_phy;
+ u64 tx_pause_tc2_phy;
+ u64 rx_pause_tc3_phy;
+ u64 tx_pause_tc3_phy;
+ u64 rx_pause_tc4_phy;
+ u64 tx_pause_tc4_phy;
+ u64 rx_pause_tc5_phy;
+ u64 tx_pause_tc5_phy;
+ u64 rx_pause_tc6_phy;
+ u64 tx_pause_tc6_phy;
+ u64 rx_pause_tc7_phy;
+ u64 tx_pause_tc7_phy;
+}; /* HW DATA */
+
/* Configure vPort Rx Steering */
struct mana_cfg_rx_steer_req_v2 {
struct gdma_req_hdr hdr;
diff --git a/include/net/mctp.h b/include/net/mctp.h
index 07d458990113..c3207ce98f07 100644
--- a/include/net/mctp.h
+++ b/include/net/mctp.h
@@ -69,7 +69,10 @@ struct mctp_sock {
/* bind() params */
unsigned int bind_net;
- mctp_eid_t bind_addr;
+ mctp_eid_t bind_local_addr;
+ mctp_eid_t bind_peer_addr;
+ unsigned int bind_peer_net;
+ bool bind_peer_set;
__u8 bind_type;
/* sendmsg()/recvmsg() uses struct sockaddr_mctp_ext */
@@ -183,8 +186,8 @@ struct mctp_sk_key {
struct mctp_skb_cb {
unsigned int magic;
unsigned int net;
- int ifindex; /* extended/direct addressing if set */
- mctp_eid_t src;
+ /* fields below provide extended addressing for ingress to recvmsg() */
+ int ifindex;
unsigned char halen;
unsigned char haddr[MAX_ADDR_LEN];
};
@@ -222,6 +225,8 @@ struct mctp_flow {
struct mctp_sk_key *key;
};
+struct mctp_dst;
+
/* Route definition.
*
* These are held in the pernet->mctp.routes list, with RCU protection for
@@ -229,16 +234,25 @@ struct mctp_flow {
* dropped on NETDEV_UNREGISTER events.
*
* Updates to the route table are performed under rtnl; all reads under RCU,
- * so routes cannot be referenced over a RCU grace period. Specifically: A
- * caller cannot block between mctp_route_lookup and mctp_route_release()
+ * so routes cannot be referenced over a RCU grace period.
*/
struct mctp_route {
mctp_eid_t min, max;
unsigned char type;
+
unsigned int mtu;
- struct mctp_dev *dev;
- int (*output)(struct mctp_route *route,
+
+ enum {
+ MCTP_ROUTE_DIRECT,
+ MCTP_ROUTE_GATEWAY,
+ } dst_type;
+ union {
+ struct mctp_dev *dev;
+ struct mctp_fq_addr gateway;
+ };
+
+ int (*output)(struct mctp_dst *dst,
struct sk_buff *skb);
struct list_head list;
@@ -246,12 +260,35 @@ struct mctp_route {
struct rcu_head rcu;
};
+/* Route lookup result: dst. Represents the results of a routing decision,
+ * but is only held over the individual routing operation.
+ *
+ * Will typically be stored on the caller stack, and must be released after
+ * usage.
+ */
+struct mctp_dst {
+ struct mctp_dev *dev;
+ unsigned int mtu;
+ mctp_eid_t nexthop;
+
+ /* set for direct addressing */
+ unsigned char halen;
+ unsigned char haddr[MAX_ADDR_LEN];
+
+ int (*output)(struct mctp_dst *dst, struct sk_buff *skb);
+};
+
+int mctp_dst_from_extaddr(struct mctp_dst *dst, struct net *net, int ifindex,
+ unsigned char halen, const unsigned char *haddr);
+
/* route interfaces */
-struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
- mctp_eid_t daddr);
+int mctp_route_lookup(struct net *net, unsigned int dnet,
+ mctp_eid_t daddr, struct mctp_dst *dst);
+
+void mctp_dst_release(struct mctp_dst *dst);
/* always takes ownership of skb */
-int mctp_local_output(struct sock *sk, struct mctp_route *rt,
+int mctp_local_output(struct sock *sk, struct mctp_dst *dst,
struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag);
void mctp_key_unref(struct mctp_sk_key *key);
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 3c88d5bc5eed..d38783a2ce57 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -60,15 +60,6 @@ enum {
#include <net/neighbour.h>
-/* Set to 3 to get tracing... */
-#define ND_DEBUG 1
-
-#define ND_PRINTK(val, level, fmt, ...) \
-do { \
- if (val <= ND_DEBUG) \
- net_##level##_ratelimited(fmt, ##__VA_ARGS__); \
-} while (0)
-
struct ctl_table;
struct inet6_dev;
struct net_device;
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 9a832cab5b1d..4a30bd458c5a 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -176,12 +176,17 @@ struct neigh_ops {
};
struct pneigh_entry {
- struct pneigh_entry *next;
+ struct pneigh_entry __rcu *next;
possible_net_t net;
struct net_device *dev;
netdevice_tracker dev_tracker;
+ union {
+ struct list_head free_node;
+ struct rcu_head rcu;
+ };
u32 flags;
u8 protocol;
+ bool permanent;
u32 key[];
};
@@ -235,7 +240,8 @@ struct neigh_table {
unsigned long last_rand;
struct neigh_statistics __percpu *stats;
struct neigh_hash_table __rcu *nht;
- struct pneigh_entry **phash_buckets;
+ struct mutex phash_lock;
+ struct pneigh_entry __rcu **phash_buckets;
};
static inline int neigh_parms_family(struct neigh_parms *p)
@@ -260,13 +266,15 @@ static inline void *neighbour_priv(const struct neighbour *n)
#define NEIGH_UPDATE_F_EXT_LEARNED BIT(5)
#define NEIGH_UPDATE_F_ISROUTER BIT(6)
#define NEIGH_UPDATE_F_ADMIN BIT(7)
+#define NEIGH_UPDATE_F_EXT_VALIDATED BIT(8)
/* In-kernel representation for NDA_FLAGS_EXT flags: */
#define NTF_OLD_MASK 0xff
#define NTF_EXT_SHIFT 8
-#define NTF_EXT_MASK (NTF_EXT_MANAGED)
+#define NTF_EXT_MASK (NTF_EXT_MANAGED | NTF_EXT_EXT_VALIDATED)
#define NTF_MANAGED (NTF_EXT_MANAGED << NTF_EXT_SHIFT)
+#define NTF_EXT_VALIDATED (NTF_EXT_EXT_VALIDATED << NTF_EXT_SHIFT)
extern const struct nla_policy nda_policy[];
@@ -373,10 +381,10 @@ unsigned long neigh_rand_reach_time(unsigned long base);
void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
struct sk_buff *skb);
struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net,
- const void *key, struct net_device *dev,
- int creat);
-struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, struct net *net,
- const void *key, struct net_device *dev);
+ const void *key, struct net_device *dev);
+int pneigh_create(struct neigh_table *tbl, struct net *net, const void *key,
+ struct net_device *dev, u32 flags, u8 protocol,
+ bool permanent);
int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key,
struct net_device *dev);
diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h
index ba2eaf39089b..6e835972abd1 100644
--- a/include/net/netdev_queues.h
+++ b/include/net/netdev_queues.h
@@ -294,6 +294,15 @@ netdev_txq_completed_mb(struct netdev_queue *dev_queue,
netif_txq_try_stop(_txq, get_desc, start_thrs); \
})
+static inline void netif_subqueue_sent(const struct net_device *dev,
+ unsigned int idx, unsigned int bytes)
+{
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(dev, idx);
+ netdev_tx_sent_queue(txq, bytes);
+}
+
#define netif_subqueue_maybe_stop(dev, idx, get_desc, stop_thrs, start_thrs) \
({ \
struct netdev_queue *_txq; \
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index 2c8c2b023848..8d65ffbf57de 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -13,9 +13,6 @@
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp;
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp;
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
-#ifdef CONFIG_NF_CT_PROTO_DCCP
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp;
-#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp;
#endif
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index ca26274196b9..aa0a7c82199e 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -18,7 +18,6 @@
#include <linux/netfilter/nf_conntrack_common.h>
#include <linux/netfilter/nf_conntrack_tcp.h>
-#include <linux/netfilter/nf_conntrack_dccp.h>
#include <linux/netfilter/nf_conntrack_sctp.h>
#include <linux/netfilter/nf_conntrack_proto_gre.h>
@@ -31,7 +30,6 @@ struct nf_ct_udp {
/* per conntrack: protocol private data */
union nf_conntrack_proto {
/* insert conntrack proto private data here */
- struct nf_ct_dccp dccp;
struct ip_ct_sctp sctp;
struct ip_ct_tcp tcp;
struct nf_ct_udp udp;
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 1f47bef51722..6929f8daf1ed 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -117,11 +117,6 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
const struct nf_hook_state *state);
-int nf_conntrack_dccp_packet(struct nf_conn *ct,
- struct sk_buff *skb,
- unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- const struct nf_hook_state *state);
int nf_conntrack_sctp_packet(struct nf_conn *ct,
struct sk_buff *skb,
unsigned int dataoff,
@@ -137,7 +132,6 @@ void nf_conntrack_generic_init_net(struct net *net);
void nf_conntrack_tcp_init_net(struct net *net);
void nf_conntrack_udp_init_net(struct net *net);
void nf_conntrack_gre_init_net(struct net *net);
-void nf_conntrack_dccp_init_net(struct net *net);
void nf_conntrack_sctp_init_net(struct net *net);
void nf_conntrack_icmp_init_net(struct net *net);
void nf_conntrack_icmpv6_init_net(struct net *net);
@@ -223,13 +217,6 @@ static inline bool nf_conntrack_tcp_established(const struct nf_conn *ct)
}
#endif
-#ifdef CONFIG_NF_CT_PROTO_DCCP
-static inline struct nf_dccp_net *nf_dccp_pernet(struct net *net)
-{
- return &net->ct.nf_ct_proto.dccp;
-}
-#endif
-
#ifdef CONFIG_NF_CT_PROTO_SCTP
static inline struct nf_sctp_net *nf_sctp_pernet(struct net *net)
{
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
index e55eedc84ed7..00506792a06d 100644
--- a/include/net/netfilter/nf_log.h
+++ b/include/net/netfilter/nf_log.h
@@ -59,6 +59,9 @@ extern int sysctl_nf_log_all_netns;
int nf_log_register(u_int8_t pf, struct nf_logger *logger);
void nf_log_unregister(struct nf_logger *logger);
+/* Check if any logger is registered for a given protocol family. */
+bool nf_log_is_registered(u_int8_t pf);
+
int nf_log_set(struct net *net, u_int8_t pf, const struct nf_logger *logger);
void nf_log_unset(struct net *net, const struct nf_logger *logger);
diff --git a/include/net/netfilter/nf_reject.h b/include/net/netfilter/nf_reject.h
index 7c669792fb9c..f1db33bc6bf8 100644
--- a/include/net/netfilter/nf_reject.h
+++ b/include/net/netfilter/nf_reject.h
@@ -34,7 +34,6 @@ static inline bool nf_reject_verify_csum(struct sk_buff *skb, int dataoff,
/* Protocols with partial checksums. */
case IPPROTO_UDPLITE:
- case IPPROTO_DCCP:
return false;
}
return true;
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 5e49619ae49c..891e43a01bdc 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -459,19 +459,13 @@ struct nft_set_ext;
* control plane functions.
*/
struct nft_set_ops {
- bool (*lookup)(const struct net *net,
+ const struct nft_set_ext * (*lookup)(const struct net *net,
const struct nft_set *set,
+ const u32 *key);
+ const struct nft_set_ext * (*update)(struct nft_set *set,
const u32 *key,
- const struct nft_set_ext **ext);
- bool (*update)(struct nft_set *set,
- const u32 *key,
- struct nft_elem_priv *
- (*new)(struct nft_set *,
- const struct nft_expr *,
- struct nft_regs *),
const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_set_ext **ext);
+ struct nft_regs *regs);
bool (*delete)(const struct nft_set *set,
const u32 *key);
@@ -1939,11 +1933,6 @@ static inline u64 nft_net_tstamp(const struct net *net)
#define __NFT_REDUCE_READONLY 1UL
#define NFT_REDUCE_READONLY (void *)__NFT_REDUCE_READONLY
-static inline bool nft_reduce_is_readonly(const struct nft_expr *expr)
-{
- return expr->ops->reduce == NFT_REDUCE_READONLY;
-}
-
void nft_reg_track_update(struct nft_regs_track *track,
const struct nft_expr *expr, u8 dreg, u8 len);
void nft_reg_track_cancel(struct nft_regs_track *track, u8 dreg, u8 len);
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index 03b6165756fc..6c2f483d9828 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -94,34 +94,41 @@ extern const struct nft_set_type nft_set_pipapo_type;
extern const struct nft_set_type nft_set_pipapo_avx2_type;
#ifdef CONFIG_MITIGATION_RETPOLINE
-bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext);
-bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext);
-bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext);
-bool nft_hash_lookup_fast(const struct net *net,
- const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext);
-bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext);
-bool nft_set_do_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext);
+const struct nft_set_ext *
+nft_rhash_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key);
+const struct nft_set_ext *
+nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key);
+const struct nft_set_ext *
+nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key);
+const struct nft_set_ext *
+nft_hash_lookup_fast(const struct net *net, const struct nft_set *set,
+ const u32 *key);
+const struct nft_set_ext *
+nft_hash_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key);
+const struct nft_set_ext *
+nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key);
#else
-static inline bool
+static inline const struct nft_set_ext *
nft_set_do_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext)
+ const u32 *key)
{
- return set->ops->lookup(net, set, key, ext);
+ return set->ops->lookup(net, set, key);
}
#endif
/* called from nft_pipapo_avx2.c */
-bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext);
+const struct nft_set_ext *
+nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key);
/* called from nft_set_pipapo.c */
-bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext);
+const struct nft_set_ext *
+nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key);
void nft_counter_init_seqcount(void);
@@ -181,4 +188,7 @@ void nft_objref_eval(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt);
void nft_objref_map_eval(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt);
+struct nft_elem_priv *nft_dynset_new(struct nft_set *set,
+ const struct nft_expr *expr,
+ struct nft_regs *regs);
#endif /* _NET_NF_TABLES_CORE_H */
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 90a560dc167a..1a8356ca4b78 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -68,6 +68,8 @@
* nlmsg_for_each_msg() loop over all messages
* nlmsg_validate() validate netlink message incl. attrs
* nlmsg_for_each_attr() loop over all attributes
+ * nlmsg_for_each_attr_type() loop over all attributes with the
+ * given type
*
* Misc:
* nlmsg_report() report back to application?
@@ -967,6 +969,18 @@ static inline u32 nlmsg_seq(const struct nlmsghdr *nlh)
nlmsg_attrlen(nlh, hdrlen), rem)
/**
+ * nlmsg_for_each_attr_type - iterate over a stream of attributes
+ * @pos: loop counter, set to the current attribute
+ * @type: required attribute type for @pos
+ * @nlh: netlink message header
+ * @hdrlen: length of the family specific header
+ * @rem: initialized to len, holds bytes currently remaining in stream
+ */
+#define nlmsg_for_each_attr_type(pos, type, nlh, hdrlen, rem) \
+ nlmsg_for_each_attr(pos, nlh, hdrlen, rem) \
+ if (nla_type(pos) == type)
+
+/**
* nlmsg_put - Add a new netlink message to an skb
* @skb: socket buffer to store message in
* @portid: netlink PORTID of requesting application
diff --git a/include/net/netmem.h b/include/net/netmem.h
index 386164fb9c18..f7dacc9e75fd 100644
--- a/include/net/netmem.h
+++ b/include/net/netmem.h
@@ -12,6 +12,50 @@
#include <linux/mm.h>
#include <net/net_debug.h>
+/* These fields in struct page are used by the page_pool and net stack:
+ *
+ * struct {
+ * unsigned long pp_magic;
+ * struct page_pool *pp;
+ * unsigned long _pp_mapping_pad;
+ * unsigned long dma_addr;
+ * atomic_long_t pp_ref_count;
+ * };
+ *
+ * We mirror the page_pool fields here so the page_pool can access these
+ * fields without worrying whether the underlying fields belong to a
+ * page or netmem_desc.
+ *
+ * CAUTION: Do not update the fields in netmem_desc without also
+ * updating the anonymous aliasing union in struct net_iov.
+ */
+struct netmem_desc {
+ unsigned long _flags;
+ unsigned long pp_magic;
+ struct page_pool *pp;
+ unsigned long _pp_mapping_pad;
+ unsigned long dma_addr;
+ atomic_long_t pp_ref_count;
+};
+
+#define NETMEM_DESC_ASSERT_OFFSET(pg, desc) \
+ static_assert(offsetof(struct page, pg) == \
+ offsetof(struct netmem_desc, desc))
+NETMEM_DESC_ASSERT_OFFSET(flags, _flags);
+NETMEM_DESC_ASSERT_OFFSET(pp_magic, pp_magic);
+NETMEM_DESC_ASSERT_OFFSET(pp, pp);
+NETMEM_DESC_ASSERT_OFFSET(_pp_mapping_pad, _pp_mapping_pad);
+NETMEM_DESC_ASSERT_OFFSET(dma_addr, dma_addr);
+NETMEM_DESC_ASSERT_OFFSET(pp_ref_count, pp_ref_count);
+#undef NETMEM_DESC_ASSERT_OFFSET
+
+/*
+ * Since struct netmem_desc uses the space in struct page, the size
+ * should be checked, until struct netmem_desc has its own instance from
+ * slab, to avoid conflicting with other members within struct page.
+ */
+static_assert(sizeof(struct netmem_desc) <= offsetof(struct page, _refcount));
+
/* net_iov */
DECLARE_STATIC_KEY_FALSE(page_pool_mem_providers);
@@ -30,13 +74,48 @@ enum net_iov_type {
NET_IOV_MAX = ULONG_MAX
};
+/* A memory descriptor representing abstract networking I/O vectors,
+ * generally for non-pages memory that doesn't have its corresponding
+ * struct page and needs to be explicitly allocated through slab.
+ *
+ * net_iovs are allocated and used by networking code, and the size of
+ * the chunk is PAGE_SIZE.
+ *
+ * This memory can be any form of non-struct paged memory. Examples
+ * include imported dmabuf memory and imported io_uring memory. See
+ * net_iov_type for all the supported types.
+ *
+ * @pp_magic: pp field, similar to the one in struct page/struct
+ * netmem_desc.
+ * @pp: the pp this net_iov belongs to, if any.
+ * @dma_addr: the dma addrs of the net_iov. Needed for the network
+ * card to send/receive this net_iov.
+ * @pp_ref_count: the pp ref count of this net_iov, exactly the same
+ * usage as struct page/struct netmem_desc.
+ * @owner: the net_iov_area this net_iov belongs to, if any.
+ * @type: the type of the memory. Different types of net_iovs are
+ * supported.
+ */
struct net_iov {
- enum net_iov_type type;
- unsigned long pp_magic;
- struct page_pool *pp;
+ union {
+ struct netmem_desc desc;
+
+ /* XXX: The following part should be removed once all
+ * the references to them are converted so as to be
+ * accessed via netmem_desc e.g. niov->desc.pp instead
+ * of niov->pp.
+ */
+ struct {
+ unsigned long _flags;
+ unsigned long pp_magic;
+ struct page_pool *pp;
+ unsigned long _pp_mapping_pad;
+ unsigned long dma_addr;
+ atomic_long_t pp_ref_count;
+ };
+ };
struct net_iov_area *owner;
- unsigned long dma_addr;
- atomic_long_t pp_ref_count;
+ enum net_iov_type type;
};
struct net_iov_area {
@@ -48,27 +127,22 @@ struct net_iov_area {
unsigned long base_virtual;
};
-/* These fields in struct page are used by the page_pool and net stack:
- *
- * struct {
- * unsigned long pp_magic;
- * struct page_pool *pp;
- * unsigned long _pp_mapping_pad;
- * unsigned long dma_addr;
- * atomic_long_t pp_ref_count;
- * };
- *
- * We mirror the page_pool fields here so the page_pool can access these fields
- * without worrying whether the underlying fields belong to a page or net_iov.
+/* net_iov is union'ed with struct netmem_desc mirroring struct page, so
+ * the page_pool can access these fields without worrying whether the
+ * underlying fields are accessed via netmem_desc or directly via
+ * net_iov, until all the references to them are converted so as to be
+ * accessed via netmem_desc e.g. niov->desc.pp instead of niov->pp.
*
- * The non-net stack fields of struct page are private to the mm stack and must
- * never be mirrored to net_iov.
+ * The non-net stack fields of struct page are private to the mm stack
+ * and must never be mirrored to net_iov.
*/
-#define NET_IOV_ASSERT_OFFSET(pg, iov) \
- static_assert(offsetof(struct page, pg) == \
+#define NET_IOV_ASSERT_OFFSET(desc, iov) \
+ static_assert(offsetof(struct netmem_desc, desc) == \
offsetof(struct net_iov, iov))
+NET_IOV_ASSERT_OFFSET(_flags, _flags);
NET_IOV_ASSERT_OFFSET(pp_magic, pp_magic);
NET_IOV_ASSERT_OFFSET(pp, pp);
+NET_IOV_ASSERT_OFFSET(_pp_mapping_pad, _pp_mapping_pad);
NET_IOV_ASSERT_OFFSET(dma_addr, dma_addr);
NET_IOV_ASSERT_OFFSET(pp_ref_count, pp_ref_count);
#undef NET_IOV_ASSERT_OFFSET
@@ -89,8 +163,7 @@ static inline unsigned int net_iov_idx(const struct net_iov *niov)
* typedef netmem_ref - a nonexistent type marking a reference to generic
* network memory.
*
- * A netmem_ref currently is always a reference to a struct page. This
- * abstraction is introduced so support for new memory types can be added.
+ * A netmem_ref can be a struct page* or a struct net_iov* underneath.
*
* Use the supplied helpers to obtain the underlying memory pointer and fields.
*/
@@ -117,9 +190,6 @@ static inline struct page *__netmem_to_page(netmem_ref netmem)
return (__force struct page *)netmem;
}
-/* This conversion fails (returns NULL) if the netmem_ref is not struct page
- * backed.
- */
static inline struct page *netmem_to_page(netmem_ref netmem)
{
if (WARN_ON_ONCE(netmem_is_net_iov(netmem)))
@@ -143,10 +213,9 @@ static inline netmem_ref net_iov_to_netmem(struct net_iov *niov)
return (__force netmem_ref)((unsigned long)niov | NET_IOV);
}
-static inline netmem_ref page_to_netmem(struct page *page)
-{
- return (__force netmem_ref)page;
-}
+#define page_to_netmem(p) (_Generic((p), \
+ const struct page * : (__force const netmem_ref)(p), \
+ struct page * : (__force netmem_ref)(p)))
/**
* virt_to_netmem - convert virtual memory pointer to a netmem reference
@@ -178,11 +247,61 @@ static inline unsigned long netmem_pfn_trace(netmem_ref netmem)
return page_to_pfn(netmem_to_page(netmem));
}
+/**
+ * __netmem_to_nmdesc - unsafely get pointer to the &netmem_desc backing
+ * @netmem
+ * @netmem: netmem reference to convert
+ *
+ * Unsafe version that can be used only when @netmem is always backed by
+ * system memory, performs faster and generates smaller object code (no
+ * check for the LSB, no WARN). When @netmem points to IOV, provokes
+ * undefined behaviour.
+ *
+ * Return: pointer to the &netmem_desc (garbage if @netmem is not backed
+ * by system memory).
+ */
+static inline struct netmem_desc *__netmem_to_nmdesc(netmem_ref netmem)
+{
+ return (__force struct netmem_desc *)netmem;
+}
+
+/* __netmem_clear_lsb - convert netmem_ref to struct net_iov * for access to
+ * common fields.
+ * @netmem: netmem reference to extract as net_iov.
+ *
+ * All the sub types of netmem_ref (page, net_iov) have the same pp, pp_magic,
+ * dma_addr, and pp_ref_count fields at the same offsets. Thus, we can access
+ * these fields without a type check to make sure that the underlying mem is
+ * net_iov or page.
+ *
+ * The resulting value of this function can only be used to access the fields
+ * that are NET_IOV_ASSERT_OFFSET'd. Accessing any other fields will result in
+ * undefined behavior.
+ *
+ * Return: the netmem_ref cast to net_iov* regardless of its underlying type.
+ */
static inline struct net_iov *__netmem_clear_lsb(netmem_ref netmem)
{
return (struct net_iov *)((__force unsigned long)netmem & ~NET_IOV);
}
+/* XXX: How to extract netmem_desc from page must be changed, once
+ * netmem_desc no longer overlays on page and will be allocated through
+ * slab.
+ */
+#define __pp_page_to_nmdesc(p) (_Generic((p), \
+ const struct page * : (const struct netmem_desc *)(p), \
+ struct page * : (struct netmem_desc *)(p)))
+
+/* CAUTION: Check if the page is a pp page before calling this helper or
+ * know it's a pp page.
+ */
+#define pp_page_to_nmdesc(p) \
+({ \
+ DEBUG_NET_WARN_ON_ONCE(!page_pool_page_is_pp(p)); \
+ __pp_page_to_nmdesc(p); \
+})
+
/**
* __netmem_get_pp - unsafely get pointer to the &page_pool backing @netmem
* @netmem: netmem reference to get the pointer from
@@ -196,7 +315,7 @@ static inline struct net_iov *__netmem_clear_lsb(netmem_ref netmem)
*/
static inline struct page_pool *__netmem_get_pp(netmem_ref netmem)
{
- return __netmem_to_page(netmem)->pp;
+ return __netmem_to_nmdesc(netmem)->pp;
}
static inline struct page_pool *netmem_get_pp(netmem_ref netmem)
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index bae914815aa3..ab74b5ed0b01 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -7,9 +7,6 @@
#include <linux/atomic.h>
#include <linux/workqueue.h>
#include <linux/netfilter/nf_conntrack_tcp.h>
-#ifdef CONFIG_NF_CT_PROTO_DCCP
-#include <linux/netfilter/nf_conntrack_dccp.h>
-#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
#include <linux/netfilter/nf_conntrack_sctp.h>
#endif
@@ -50,13 +47,6 @@ struct nf_icmp_net {
unsigned int timeout;
};
-#ifdef CONFIG_NF_CT_PROTO_DCCP
-struct nf_dccp_net {
- u8 dccp_loose;
- unsigned int dccp_timeout[CT_DCCP_MAX + 1];
-};
-#endif
-
#ifdef CONFIG_NF_CT_PROTO_SCTP
struct nf_sctp_net {
unsigned int timeouts[SCTP_CONNTRACK_MAX];
@@ -82,9 +72,6 @@ struct nf_ip_net {
struct nf_udp_net udp;
struct nf_icmp_net icmp;
struct nf_icmp_net icmpv6;
-#ifdef CONFIG_NF_CT_PROTO_DCCP
- struct nf_dccp_net dccp;
-#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
struct nf_sctp_net sctp;
#endif
diff --git a/include/net/netns/mctp.h b/include/net/netns/mctp.h
index 1db8f9aaddb4..89555f90b97b 100644
--- a/include/net/netns/mctp.h
+++ b/include/net/netns/mctp.h
@@ -6,19 +6,25 @@
#ifndef __NETNS_MCTP_H__
#define __NETNS_MCTP_H__
+#include <linux/hash.h>
+#include <linux/hashtable.h>
#include <linux/mutex.h>
#include <linux/types.h>
+#define MCTP_BINDS_BITS 7
+
struct netns_mctp {
/* Only updated under RTNL, entries freed via RCU */
struct list_head routes;
- /* Bound sockets: list of sockets bound by type.
- * This list is updated from non-atomic contexts (under bind_lock),
- * and read (under rcu) in packet rx
+ /* Bound sockets: hash table of sockets, keyed by
+ * (type, src_eid, dest_eid).
+ * Specific src_eid/dest_eid entries also have an entry for
+ * MCTP_ADDR_ANY. This list is updated from non-atomic contexts
+ * (under bind_lock), and read (under rcu) in packet rx.
*/
struct mutex bind_lock;
- struct hlist_head binds;
+ DECLARE_HASHTABLE(binds, MCTP_BINDS_BITS);
/* tag allocations. This list is read and updated from atomic contexts,
* but elements are free()ed after a RCU grace-period
@@ -34,4 +40,10 @@ struct netns_mctp {
struct list_head neighbours;
};
+static inline u32 mctp_bind_hash(u8 type, u8 local_addr, u8 peer_addr)
+{
+ return hash_32(type | (u32)local_addr << 8 | (u32)peer_addr << 16,
+ MCTP_BINDS_BITS);
+}
+
#endif /* __NETNS_MCTP_H__ */
diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h
index 93f2c31baf9b..db180626be06 100644
--- a/include/net/page_pool/helpers.h
+++ b/include/net/page_pool/helpers.h
@@ -153,6 +153,13 @@ static inline netmem_ref page_pool_dev_alloc_netmem(struct page_pool *pool,
return page_pool_alloc_netmem(pool, offset, size, gfp);
}
+static inline netmem_ref page_pool_dev_alloc_netmems(struct page_pool *pool)
+{
+ gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
+
+ return page_pool_alloc_netmems(pool, gfp);
+}
+
static inline struct page *page_pool_alloc(struct page_pool *pool,
unsigned int *offset,
unsigned int *size, gfp_t gfp)
@@ -437,12 +444,7 @@ static inline dma_addr_t page_pool_get_dma_addr_netmem(netmem_ref netmem)
*/
static inline dma_addr_t page_pool_get_dma_addr(const struct page *page)
{
- dma_addr_t ret = page->dma_addr;
-
- if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA)
- ret <<= PAGE_SHIFT;
-
- return ret;
+ return page_pool_get_dma_addr_netmem(page_to_netmem(page));
}
static inline void __page_pool_dma_sync_for_cpu(const struct page_pool *pool,
diff --git a/include/net/pfcp.h b/include/net/pfcp.h
index af14f970b80e..639553797d3e 100644
--- a/include/net/pfcp.h
+++ b/include/net/pfcp.h
@@ -45,7 +45,7 @@ struct pfcphdr_session {
reserved:4;
#elif defined(__BIG_ENDIAN_BITFIELD)
u8 reserved:4,
- message_priprity:4;
+ message_priority:4;
#else
#error "Please fix <asm/byteorder>"
#endif
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index b07b1cd14e9f..6a5ec1418e85 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -30,8 +30,6 @@ struct request_sock_ops {
unsigned int obj_size;
struct kmem_cache *slab;
char *slab_name;
- int (*rtx_syn_ack)(const struct sock *sk,
- struct request_sock *req);
void (*send_ack)(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req);
void (*send_reset)(const struct sock *sk,
@@ -41,8 +39,6 @@ struct request_sock_ops {
void (*syn_ack_timeout)(const struct request_sock *req);
};
-int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
-
struct saved_syn {
u32 mac_hdrlen;
u32 network_hdrlen;
diff --git a/include/net/route.h b/include/net/route.h
index 8e39aa822cf9..7ea840daa775 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -153,7 +153,7 @@ static inline void inet_sk_init_flowi4(const struct inet_sock *inet,
ip_sock_rt_tos(sk), ip_sock_rt_scope(sk),
sk->sk_protocol, inet_sk_flowi_flags(sk), daddr,
inet->inet_saddr, inet->inet_dport,
- inet->inet_sport, sk->sk_uid);
+ inet->inet_sport, sk_uid(sk));
security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
}
@@ -331,7 +331,7 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst,
flowi4_init_output(fl4, oif, READ_ONCE(sk->sk_mark), ip_sock_rt_tos(sk),
ip_sock_rt_scope(sk), protocol, flow_flags, dst,
- src, dport, sport, sk->sk_uid);
+ src, dport, sport, sk_uid(sk));
}
static inline struct rtable *ip_route_connect(struct flowi4 *fl4, __be32 dst,
@@ -390,7 +390,7 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
const struct net *net;
rcu_read_lock();
- net = dev_net_rcu(dst->dev);
+ net = dev_net_rcu(dst_dev(dst));
hoplimit = READ_ONCE(net->ipv4.sysctl_ip_default_ttl);
rcu_read_unlock();
}
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 1ad7ce71d0a7..8a540ad9b509 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -51,9 +51,9 @@
* We should wean ourselves off this.
*/
union sctp_addr {
+ struct sockaddr_inet sa; /* Large enough for both address families */
struct sockaddr_in v4;
struct sockaddr_in6 v6;
- struct sockaddr sa;
};
/* Forward declarations for data structures. */
diff --git a/include/net/sock.h b/include/net/sock.h
index 131f3662426f..c8a4b283df6f 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1553,7 +1553,7 @@ __sk_rmem_schedule(struct sock *sk, int size, bool pfmemalloc)
}
static inline bool
-sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
+sk_rmem_schedule(struct sock *sk, const struct sk_buff *skb, int size)
{
return __sk_rmem_schedule(sk, size, skb_pfmemalloc(skb));
}
@@ -2076,6 +2076,7 @@ static inline void sock_orphan(struct sock *sk)
sock_set_flag(sk, SOCK_DEAD);
sk_set_socket(sk, NULL);
sk->sk_wq = NULL;
+ /* Note: sk_uid is unchanged. */
write_unlock_bh(&sk->sk_callback_lock);
}
@@ -2086,18 +2087,23 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
rcu_assign_pointer(sk->sk_wq, &parent->wq);
parent->sk = sk;
sk_set_socket(sk, parent);
- sk->sk_uid = SOCK_INODE(parent)->i_uid;
+ WRITE_ONCE(sk->sk_uid, SOCK_INODE(parent)->i_uid);
security_sock_graft(sk, parent);
write_unlock_bh(&sk->sk_callback_lock);
}
-kuid_t sock_i_uid(struct sock *sk);
+static inline kuid_t sk_uid(const struct sock *sk)
+{
+ /* Paired with WRITE_ONCE() in sockfs_setattr() */
+ return READ_ONCE(sk->sk_uid);
+}
+
unsigned long __sock_i_ino(struct sock *sk);
unsigned long sock_i_ino(struct sock *sk);
static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
{
- return sk ? sk->sk_uid : make_kuid(net->user_ns, 0);
+ return sk ? sk_uid(sk) : make_kuid(net->user_ns, 0);
}
static inline u32 net_tx_rndhash(void)
@@ -2590,12 +2596,12 @@ static inline gfp_t gfp_memcg_charge(void)
static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
{
- return noblock ? 0 : sk->sk_rcvtimeo;
+ return noblock ? 0 : READ_ONCE(sk->sk_rcvtimeo);
}
static inline long sock_sndtimeo(const struct sock *sk, bool noblock)
{
- return noblock ? 0 : sk->sk_sndtimeo;
+ return noblock ? 0 : READ_ONCE(sk->sk_sndtimeo);
}
static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
@@ -2986,7 +2992,6 @@ void sock_set_timestamp(struct sock *sk, int optname, bool valbool);
int sock_set_timestamping(struct sock *sk, int optname,
struct so_timestamping timestamping);
-void sock_enable_timestamps(struct sock *sk);
#if defined(CONFIG_CGROUP_BPF)
void bpf_skops_tx_timestamping(struct sock *sk, struct sk_buff *skb, int op);
#else
diff --git a/include/net/tc_act/tc_connmark.h b/include/net/tc_act/tc_connmark.h
index e8dd77a96748..a5ce83f3eea4 100644
--- a/include/net/tc_act/tc_connmark.h
+++ b/include/net/tc_act/tc_connmark.h
@@ -7,6 +7,7 @@
struct tcf_connmark_parms {
struct net *net;
u16 zone;
+ int action;
struct rcu_head rcu;
};
diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h
index 68269e4581b7..8d0c7a9f9345 100644
--- a/include/net/tc_act/tc_csum.h
+++ b/include/net/tc_act/tc_csum.h
@@ -8,6 +8,7 @@
struct tcf_csum_params {
u32 update_flags;
+ int action;
struct rcu_head rcu;
};
@@ -18,15 +19,6 @@ struct tcf_csum {
};
#define to_tcf_csum(a) ((struct tcf_csum *)a)
-static inline bool is_tcf_csum(const struct tc_action *a)
-{
-#ifdef CONFIG_NET_CLS_ACT
- if (a->ops && a->ops->id == TCA_ID_CSUM)
- return true;
-#endif
- return false;
-}
-
static inline u32 tcf_csum_update_flags(const struct tc_action *a)
{
u32 update_flags;
diff --git a/include/net/tc_act/tc_ct.h b/include/net/tc_act/tc_ct.h
index 77f87c622a2e..8b90c86c0b0d 100644
--- a/include/net/tc_act/tc_ct.h
+++ b/include/net/tc_act/tc_ct.h
@@ -13,7 +13,7 @@ struct tcf_ct_params {
struct nf_conntrack_helper *helper;
struct nf_conn *tmpl;
u16 zone;
-
+ int action;
u32 mark;
u32 mark_mask;
@@ -92,13 +92,4 @@ static inline void
tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie) { }
#endif
-static inline bool is_tcf_ct(const struct tc_action *a)
-{
-#if defined(CONFIG_NET_CLS_ACT) && IS_ENABLED(CONFIG_NF_CONNTRACK)
- if (a->ops && a->ops->id == TCA_ID_CT)
- return true;
-#endif
- return false;
-}
-
#endif /* __NET_TC_CT_H */
diff --git a/include/net/tc_act/tc_ctinfo.h b/include/net/tc_act/tc_ctinfo.h
index f071c1d70a25..7fe01ab236da 100644
--- a/include/net/tc_act/tc_ctinfo.h
+++ b/include/net/tc_act/tc_ctinfo.h
@@ -7,6 +7,7 @@
struct tcf_ctinfo_params {
struct rcu_head rcu;
struct net *net;
+ int action;
u32 dscpmask;
u32 dscpstatemask;
u32 cpmarkmask;
@@ -18,9 +19,9 @@ struct tcf_ctinfo_params {
struct tcf_ctinfo {
struct tc_action common;
struct tcf_ctinfo_params __rcu *params;
- u64 stats_dscp_set;
- u64 stats_dscp_error;
- u64 stats_cpmark_set;
+ atomic64_t stats_dscp_set;
+ atomic64_t stats_dscp_error;
+ atomic64_t stats_cpmark_set;
};
enum {
diff --git a/include/net/tc_act/tc_gate.h b/include/net/tc_act/tc_gate.h
index c8fa11ebb397..c1a67149c6b6 100644
--- a/include/net/tc_act/tc_gate.h
+++ b/include/net/tc_act/tc_gate.h
@@ -51,15 +51,6 @@ struct tcf_gate {
#define to_gate(a) ((struct tcf_gate *)a)
-static inline bool is_tcf_gate(const struct tc_action *a)
-{
-#ifdef CONFIG_NET_CLS_ACT
- if (a->ops && a->ops->id == TCA_ID_GATE)
- return true;
-#endif
- return false;
-}
-
static inline s32 tcf_gate_prio(const struct tc_action *a)
{
s32 tcfg_prio;
diff --git a/include/net/tc_act/tc_mpls.h b/include/net/tc_act/tc_mpls.h
index 721de4f5733a..dd067bd4018d 100644
--- a/include/net/tc_act/tc_mpls.h
+++ b/include/net/tc_act/tc_mpls.h
@@ -10,6 +10,7 @@
struct tcf_mpls_params {
int tcfm_action;
u32 tcfm_label;
+ int action; /* tcf_action */
u8 tcfm_tc;
u8 tcfm_ttl;
u8 tcfm_bos;
@@ -27,15 +28,6 @@ struct tcf_mpls {
};
#define to_mpls(a) ((struct tcf_mpls *)a)
-static inline bool is_tcf_mpls(const struct tc_action *a)
-{
-#ifdef CONFIG_NET_CLS_ACT
- if (a->ops && a->ops->id == TCA_ID_MPLS)
- return true;
-#endif
- return false;
-}
-
static inline u32 tcf_mpls_action(const struct tc_action *a)
{
u32 tcfm_action;
diff --git a/include/net/tc_act/tc_nat.h b/include/net/tc_act/tc_nat.h
index c869274ac529..ae35f4009445 100644
--- a/include/net/tc_act/tc_nat.h
+++ b/include/net/tc_act/tc_nat.h
@@ -6,6 +6,7 @@
#include <net/act_api.h>
struct tcf_nat_parms {
+ int action;
__be32 old_addr;
__be32 new_addr;
__be32 mask;
diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h
index 83fe39931781..f58ee15cd858 100644
--- a/include/net/tc_act/tc_pedit.h
+++ b/include/net/tc_act/tc_pedit.h
@@ -14,6 +14,7 @@ struct tcf_pedit_key_ex {
struct tcf_pedit_parms {
struct tc_pedit_key *tcfp_keys;
struct tcf_pedit_key_ex *tcfp_keys_ex;
+ int action;
u32 tcfp_off_max_hint;
unsigned char tcfp_nkeys;
unsigned char tcfp_flags;
diff --git a/include/net/tc_act/tc_police.h b/include/net/tc_act/tc_police.h
index 283bde711a42..a89fc8e68b1e 100644
--- a/include/net/tc_act/tc_police.h
+++ b/include/net/tc_act/tc_police.h
@@ -5,10 +5,11 @@
#include <net/act_api.h>
struct tcf_police_params {
+ int action;
int tcfp_result;
u32 tcfp_ewma_rate;
- s64 tcfp_burst;
u32 tcfp_mtu;
+ s64 tcfp_burst;
s64 tcfp_mtu_ptoks;
s64 tcfp_pkt_burst;
struct psched_ratecfg rate;
@@ -44,15 +45,6 @@ struct tc_police_compat {
struct tc_ratespec peakrate;
};
-static inline bool is_tcf_police(const struct tc_action *act)
-{
-#ifdef CONFIG_NET_CLS_ACT
- if (act->ops && act->ops->id == TCA_ID_POLICE)
- return true;
-#endif
- return false;
-}
-
static inline u64 tcf_police_rate_bytes_ps(const struct tc_action *act)
{
struct tcf_police *police = to_police(act);
diff --git a/include/net/tc_act/tc_sample.h b/include/net/tc_act/tc_sample.h
index b5d76305e854..abd163ca1864 100644
--- a/include/net/tc_act/tc_sample.h
+++ b/include/net/tc_act/tc_sample.h
@@ -17,15 +17,6 @@ struct tcf_sample {
};
#define to_sample(a) ((struct tcf_sample *)a)
-static inline bool is_tcf_sample(const struct tc_action *a)
-{
-#ifdef CONFIG_NET_CLS_ACT
- return a->ops && a->ops->id == TCA_ID_SAMPLE;
-#else
- return false;
-#endif
-}
-
static inline __u32 tcf_sample_rate(const struct tc_action *a)
{
return to_sample(a)->rate;
diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h
index 9649600fb3dc..31b2cd0bebb5 100644
--- a/include/net/tc_act/tc_skbedit.h
+++ b/include/net/tc_act/tc_skbedit.h
@@ -12,6 +12,7 @@
#include <linux/tc_act/tc_skbedit.h>
struct tcf_skbedit_params {
+ int action;
u32 flags;
u32 priority;
u32 mark;
diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h
index 904eddfc1826..3f5e9242b5e8 100644
--- a/include/net/tc_act/tc_vlan.h
+++ b/include/net/tc_act/tc_vlan.h
@@ -26,15 +26,6 @@ struct tcf_vlan {
};
#define to_vlan(a) ((struct tcf_vlan *)a)
-static inline bool is_tcf_vlan(const struct tc_action *a)
-{
-#ifdef CONFIG_NET_CLS_ACT
- if (a->ops && a->ops->id == TCA_ID_VLAN)
- return true;
-#endif
- return false;
-}
-
static inline u32 tcf_vlan_action(const struct tc_action *a)
{
u32 tcfv_action;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 5078ad868fee..b3815d104340 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -267,7 +267,6 @@ extern long sysctl_tcp_mem[3];
#define TCP_RACK_STATIC_REO_WND 0x2 /* Use static RACK reo wnd */
#define TCP_RACK_NO_DUPTHRESH 0x4 /* Do not use DUPACK threshold in RACK */
-extern atomic_long_t tcp_memory_allocated;
DECLARE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
extern struct percpu_counter tcp_sockets_allocated;
@@ -321,7 +320,7 @@ extern struct proto tcp_prot;
#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
-void tcp_tasklet_init(void);
+void tcp_tsq_work_init(void);
int tcp_v4_err(struct sk_buff *skb, u32);
@@ -1560,7 +1559,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
enum skb_drop_reason *reason);
-int tcp_filter(struct sock *sk, struct sk_buff *skb);
+int tcp_filter(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason *reason);
void tcp_set_state(struct sock *sk, int state);
void tcp_done(struct sock *sk);
int tcp_abort(struct sock *sk, int err);
@@ -1811,14 +1810,8 @@ static inline void tcp_mib_init(struct net *net)
}
/* from STCP */
-static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
-{
- tp->lost_skb_hint = NULL;
-}
-
static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
{
- tcp_clear_retrans_hints_partial(tp);
tp->retransmit_skb_hint = NULL;
}
diff --git a/include/net/tcx.h b/include/net/tcx.h
index 5ce0ce9e0c02..23a61af13547 100644
--- a/include/net/tcx.h
+++ b/include/net/tcx.h
@@ -20,7 +20,6 @@ struct tcx_entry {
struct tcx_link {
struct bpf_link link;
struct net_device *dev;
- u32 location;
};
static inline void tcx_set_ingress(struct sk_buff *skb, bool ingress)
diff --git a/include/net/udp.h b/include/net/udp.h
index a772510b2aa5..f8ae2c4ade14 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -205,7 +205,6 @@ static inline void udp_hash4_dec(struct udp_hslot *hslot2)
extern struct proto udp_prot;
-extern atomic_long_t udp_memory_allocated;
DECLARE_PER_CPU(int, udp_memory_per_cpu_fw_alloc);
/* sysctl variables for udp */
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index 2df3b8344eb5..9acef2fbd2fd 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -130,35 +130,20 @@ void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock,
void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type);
void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type);
-static inline void udp_tunnel_get_rx_info(struct net_device *dev)
-{
- ASSERT_RTNL();
- if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
- return;
- call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev);
-}
-
-static inline void udp_tunnel_drop_rx_info(struct net_device *dev)
-{
- ASSERT_RTNL();
- if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
- return;
- call_netdevice_notifiers(NETDEV_UDP_TUNNEL_DROP_INFO, dev);
-}
-
/* Transmit the skb using UDP encapsulation. */
void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 tos, __u8 ttl,
__be16 df, __be16 src_port, __be16 dst_port,
- bool xnet, bool nocheck);
+ bool xnet, bool nocheck, u16 ipcb_flags);
-int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb,
- struct net_device *dev,
- const struct in6_addr *saddr,
- const struct in6_addr *daddr,
- __u8 prio, __u8 ttl, __be32 label,
- __be16 src_port, __be16 dst_port, bool nocheck);
+void udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb,
+ struct net_device *dev,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u8 prio, __u8 ttl, __be32 label,
+ __be16 src_port, __be16 dst_port, bool nocheck,
+ u16 ip6cb_flags);
void udp_tunnel_sock_release(struct socket *sock);
@@ -221,19 +206,17 @@ static inline void udp_tunnel_encap_enable(struct sock *sk)
#define UDP_TUNNEL_NIC_MAX_TABLES 4
enum udp_tunnel_nic_info_flags {
- /* Device callbacks may sleep */
- UDP_TUNNEL_NIC_INFO_MAY_SLEEP = BIT(0),
/* Device only supports offloads when it's open, all ports
* will be removed before close and re-added after open.
*/
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY = BIT(1),
+ UDP_TUNNEL_NIC_INFO_OPEN_ONLY = BIT(0),
/* Device supports only IPv4 tunnels */
- UDP_TUNNEL_NIC_INFO_IPV4_ONLY = BIT(2),
+ UDP_TUNNEL_NIC_INFO_IPV4_ONLY = BIT(1),
/* Device has hard-coded the IANA VXLAN port (4789) as VXLAN.
* This port must not be counted towards n_entries of any table.
* Driver will not receive any callback associated with port 4789.
*/
- UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = BIT(3),
+ UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = BIT(2),
};
struct udp_tunnel_nic;
@@ -324,6 +307,9 @@ struct udp_tunnel_nic_ops {
size_t (*dump_size)(struct net_device *dev, unsigned int table);
int (*dump_write)(struct net_device *dev, unsigned int table,
struct sk_buff *skb);
+ void (*assert_locked)(struct net_device *dev);
+ void (*lock)(struct net_device *dev);
+ void (*unlock)(struct net_device *dev);
};
#ifdef CONFIG_INET
@@ -352,8 +338,28 @@ static inline void
udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table,
unsigned int idx, u8 priv)
{
- if (udp_tunnel_nic_ops)
+ if (udp_tunnel_nic_ops) {
+ udp_tunnel_nic_ops->assert_locked(dev);
udp_tunnel_nic_ops->set_port_priv(dev, table, idx, priv);
+ }
+}
+
+static inline void udp_tunnel_nic_assert_locked(struct net_device *dev)
+{
+ if (udp_tunnel_nic_ops)
+ udp_tunnel_nic_ops->assert_locked(dev);
+}
+
+static inline void udp_tunnel_nic_lock(struct net_device *dev)
+{
+ if (udp_tunnel_nic_ops)
+ udp_tunnel_nic_ops->lock(dev);
+}
+
+static inline void udp_tunnel_nic_unlock(struct net_device *dev)
+{
+ if (udp_tunnel_nic_ops)
+ udp_tunnel_nic_ops->unlock(dev);
}
static inline void
@@ -395,17 +401,50 @@ static inline void udp_tunnel_nic_reset_ntf(struct net_device *dev)
static inline size_t
udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table)
{
+ size_t ret;
+
if (!udp_tunnel_nic_ops)
return 0;
- return udp_tunnel_nic_ops->dump_size(dev, table);
+
+ udp_tunnel_nic_ops->lock(dev);
+ ret = udp_tunnel_nic_ops->dump_size(dev, table);
+ udp_tunnel_nic_ops->unlock(dev);
+
+ return ret;
}
static inline int
udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table,
struct sk_buff *skb)
{
+ int ret;
+
if (!udp_tunnel_nic_ops)
return 0;
- return udp_tunnel_nic_ops->dump_write(dev, table, skb);
+
+ udp_tunnel_nic_ops->lock(dev);
+ ret = udp_tunnel_nic_ops->dump_write(dev, table, skb);
+ udp_tunnel_nic_ops->unlock(dev);
+
+ return ret;
+}
+
+static inline void udp_tunnel_get_rx_info(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
+ return;
+ udp_tunnel_nic_assert_locked(dev);
+ call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev);
}
+
+static inline void udp_tunnel_drop_rx_info(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
+ return;
+ udp_tunnel_nic_assert_locked(dev);
+ call_netdevice_notifiers(NETDEV_UDP_TUNNEL_DROP_INFO, dev);
+}
+
#endif
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index e2f7ca045d3e..0ee50785f4f1 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -332,6 +332,7 @@ struct vxlan_dev {
#define VXLAN_F_VNIFILTER 0x20000
#define VXLAN_F_MDB 0x40000
#define VXLAN_F_LOCALBYPASS 0x80000
+#define VXLAN_F_MC_ROUTE 0x100000
/* Flags that are used in the receive path. These flags must match in
* order for a socket to be shareable
@@ -353,7 +354,9 @@ struct vxlan_dev {
VXLAN_F_UDP_ZERO_CSUM6_RX | \
VXLAN_F_COLLECT_METADATA | \
VXLAN_F_VNIFILTER | \
- VXLAN_F_LOCALBYPASS)
+ VXLAN_F_LOCALBYPASS | \
+ VXLAN_F_MC_ROUTE | \
+ 0)
struct net_device *vxlan_dev_create(struct net *net, const char *name,
u8 name_assign_type, struct vxlan_config *conf);
diff --git a/include/net/x25.h b/include/net/x25.h
index 5e833cfc864e..414f3fd99345 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -203,7 +203,6 @@ void x25_send_frame(struct sk_buff *, struct x25_neigh *);
int x25_lapb_receive_frame(struct sk_buff *, struct net_device *,
struct packet_type *, struct net_device *);
void x25_establish_link(struct x25_neigh *);
-void x25_terminate_link(struct x25_neigh *);
/* x25_facilities.c */
int x25_parse_facilities(struct sk_buff *, struct x25_facilities *,
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index e8bd6ddb7b12..ce587a225661 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -84,6 +84,7 @@ struct xdp_sock {
struct list_head map_list;
/* Protects map_list */
spinlock_t map_list_lock;
+ u32 max_tx_budget;
/* Protects multiple processes in the control path */
struct mutex mutex;
struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */
diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h
index 14c9f943d53f..c8cd0f00c845 100644
--- a/include/ras/ras_event.h
+++ b/include/ras/ras_event.h
@@ -252,6 +252,7 @@ TRACE_EVENT(non_standard_event,
__print_hex(__get_dynamic_array(buf), __entry->len))
);
+#ifdef CONFIG_PCIEAER
/*
* PCIe AER Trace event
*
@@ -337,6 +338,7 @@ TRACE_EVENT(aer_event,
__print_array(__entry->tlp_header, PCIE_STD_MAX_TLP_HEADERLOG, 4) :
"Not available")
);
+#endif /* CONFIG_PCIEAER */
/*
* memory-failure recovery action result event
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 7dc7b1cc71b5..0a8e092c0ea8 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -52,11 +52,15 @@ static inline int ib_umem_offset(struct ib_umem *umem)
return umem->address & ~PAGE_MASK;
}
+static inline dma_addr_t ib_umem_start_dma_addr(struct ib_umem *umem)
+{
+ return sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem);
+}
+
static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
unsigned long pgsz)
{
- return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) &
- (pgsz - 1);
+ return ib_umem_start_dma_addr(umem) & (pgsz - 1);
}
static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
@@ -135,14 +139,27 @@ static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
unsigned long pgsz_bitmap,
u64 pgoff_bitmask)
{
- struct scatterlist *sg = umem->sgt_append.sgt.sgl;
dma_addr_t dma_addr;
- dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
+ dma_addr = ib_umem_start_dma_addr(umem);
return ib_umem_find_best_pgsz(umem, pgsz_bitmap,
dma_addr & pgoff_bitmask);
}
+static inline bool ib_umem_is_contiguous(struct ib_umem *umem)
+{
+ dma_addr_t dma_addr;
+ unsigned long pgsz;
+
+ /*
+ * Select the smallest aligned page that can contain the whole umem if
+ * it was contiguous.
+ */
+ dma_addr = ib_umem_start_dma_addr(umem);
+ pgsz = roundup_pow_of_two((dma_addr ^ (umem->length - 1 + dma_addr)) + 1);
+ return !!ib_umem_find_best_pgoff(umem, pgsz, U64_MAX);
+}
+
struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
unsigned long offset, size_t size,
int fd, int access,
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index af43a8d2a74a..6139223e92e4 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -42,6 +42,7 @@
#include <rdma/signature.h>
#include <uapi/rdma/rdma_user_ioctl.h>
#include <uapi/rdma/ib_user_ioctl_verbs.h>
+#include <linux/pci-tph.h>
#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
@@ -1846,6 +1847,27 @@ struct ib_dm {
atomic_t usecnt;
};
+/* bit values to mark existence of ib_dmah fields */
+enum {
+ IB_DMAH_CPU_ID_EXISTS,
+ IB_DMAH_MEM_TYPE_EXISTS,
+ IB_DMAH_PH_EXISTS,
+};
+
+struct ib_dmah {
+ struct ib_device *device;
+ struct ib_uobject *uobject;
+ /*
+ * Implementation details of the RDMA core, don't use in drivers:
+ */
+ struct rdma_restrack_entry res;
+ u32 cpu_id;
+ enum tph_mem_type mem_type;
+ atomic_t usecnt;
+ u8 ph;
+ u8 valid_fields; /* use IB_DMAH_XXX_EXISTS */
+};
+
struct ib_mr {
struct ib_device *device;
struct ib_pd *pd;
@@ -1863,6 +1885,7 @@ struct ib_mr {
struct ib_dm *dm;
struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */
+ struct ib_dmah *dmah;
/*
* Implementation details of the RDMA core, don't use in drivers:
*/
@@ -2486,16 +2509,31 @@ struct ib_device_ops {
int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
struct uverbs_attr_bundle *attrs);
+ int (*create_cq_umem)(struct ib_cq *cq,
+ const struct ib_cq_init_attr *attr,
+ struct ib_umem *umem,
+ struct uverbs_attr_bundle *attrs);
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
+ /**
+ * pre_destroy_cq - Prevent a cq from generating any new work
+ * completions, but not free any kernel resources
+ */
+ int (*pre_destroy_cq)(struct ib_cq *cq);
+ /**
+ * post_destroy_cq - Free all kernel resources
+ */
+ void (*post_destroy_cq)(struct ib_cq *cq);
struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
struct ib_mr *(*reg_user_mr_dmabuf)(struct ib_pd *pd, u64 offset,
u64 length, u64 virt_addr, int fd,
int mr_access_flags,
+ struct ib_dmah *dmah,
struct uverbs_attr_bundle *attrs);
struct ib_mr *(*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start,
u64 length, u64 virt_addr,
@@ -2560,6 +2598,9 @@ struct ib_device_ops {
struct ib_dm_alloc_attr *attr,
struct uverbs_attr_bundle *attrs);
int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
+ int (*alloc_dmah)(struct ib_dmah *ibdmah,
+ struct uverbs_attr_bundle *attrs);
+ int (*dealloc_dmah)(struct ib_dmah *dmah, struct uverbs_attr_bundle *attrs);
struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
struct ib_dm_mr_attr *attr,
struct uverbs_attr_bundle *attrs);
@@ -2717,6 +2758,7 @@ struct ib_device_ops {
DECLARE_RDMA_OBJ_SIZE(ib_ah);
DECLARE_RDMA_OBJ_SIZE(ib_counters);
DECLARE_RDMA_OBJ_SIZE(ib_cq);
+ DECLARE_RDMA_OBJ_SIZE(ib_dmah);
DECLARE_RDMA_OBJ_SIZE(ib_mw);
DECLARE_RDMA_OBJ_SIZE(ib_pd);
DECLARE_RDMA_OBJ_SIZE(ib_qp);
@@ -2905,11 +2947,18 @@ struct ib_block_iter {
unsigned int __pg_bit; /* alignment of current block */
};
-struct ib_device *_ib_alloc_device(size_t size);
+struct ib_device *_ib_alloc_device(size_t size, struct net *net);
#define ib_alloc_device(drv_struct, member) \
container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
BUILD_BUG_ON_ZERO(offsetof( \
- struct drv_struct, member))), \
+ struct drv_struct, member)), \
+ &init_net), \
+ struct drv_struct, member)
+
+#define ib_alloc_device_with_net(drv_struct, member, net) \
+ container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof( \
+ struct drv_struct, member)), net), \
struct drv_struct, member)
void ib_dealloc_device(struct ib_device *device);
@@ -4794,11 +4843,17 @@ struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
+bool rdma_uattrs_has_raw_cap(const struct uverbs_attr_bundle *attrs);
#else
static inline int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs)
{
return 0;
}
+static inline bool
+rdma_uattrs_has_raw_cap(const struct uverbs_attr_bundle *attrs)
+{
+ return false;
+}
#endif
struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
@@ -4855,6 +4910,12 @@ static inline int ibdev_to_node(struct ib_device *ibdev)
bool rdma_dev_access_netns(const struct ib_device *device,
const struct net *net);
+bool rdma_dev_has_raw_cap(const struct ib_device *dev);
+static inline struct net *rdma_dev_net(struct ib_device *device)
+{
+ return read_pnet(&device->coredev.rdma_net);
+}
+
#define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
#define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF)
#define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
index 0d69ded73bf2..8a9bcf77dace 100644
--- a/include/rdma/restrack.h
+++ b/include/rdma/restrack.h
@@ -57,6 +57,10 @@ enum rdma_restrack_type {
*/
RDMA_RESTRACK_SRQ,
/**
+ * @RDMA_RESTRACK_DMAH: DMA handle
+ */
+ RDMA_RESTRACK_DMAH,
+ /**
* @RDMA_RESTRACK_MAX: Last entry, used for array dclarations
*/
RDMA_RESTRACK_MAX
diff --git a/include/rv/da_monitor.h b/include/rv/da_monitor.h
index 510c88bfabd4..17fa4f6e5ea6 100644
--- a/include/rv/da_monitor.h
+++ b/include/rv/da_monitor.h
@@ -19,45 +19,22 @@
#ifdef CONFIG_RV_REACTORS
#define DECLARE_RV_REACTING_HELPERS(name, type) \
-static char REACT_MSG_##name[1024]; \
- \
-static inline char *format_react_msg_##name(type curr_state, type event) \
-{ \
- snprintf(REACT_MSG_##name, 1024, \
- "rv: monitor %s does not allow event %s on state %s\n", \
- #name, \
- model_get_event_name_##name(event), \
- model_get_state_name_##name(curr_state)); \
- return REACT_MSG_##name; \
-} \
- \
-static void cond_react_##name(char *msg) \
+static void cond_react_##name(type curr_state, type event) \
{ \
- if (rv_##name.react) \
- rv_##name.react(msg); \
-} \
- \
-static bool rv_reacting_on_##name(void) \
-{ \
- return rv_reacting_on(); \
+ if (!rv_reacting_on() || !rv_##name.react) \
+ return; \
+ rv_##name.react("rv: monitor %s does not allow event %s on state %s\n", \
+ #name, \
+ model_get_event_name_##name(event), \
+ model_get_state_name_##name(curr_state)); \
}
#else /* CONFIG_RV_REACTOR */
#define DECLARE_RV_REACTING_HELPERS(name, type) \
-static inline char *format_react_msg_##name(type curr_state, type event) \
-{ \
- return NULL; \
-} \
- \
-static void cond_react_##name(char *msg) \
+static void cond_react_##name(type curr_state, type event) \
{ \
return; \
-} \
- \
-static bool rv_reacting_on_##name(void) \
-{ \
- return 0; \
}
#endif
@@ -78,23 +55,6 @@ static inline void da_monitor_reset_##name(struct da_monitor *da_mon) \
} \
\
/* \
- * da_monitor_curr_state_##name - return the current state \
- */ \
-static inline type da_monitor_curr_state_##name(struct da_monitor *da_mon) \
-{ \
- return da_mon->curr_state; \
-} \
- \
-/* \
- * da_monitor_set_state_##name - set the new current state \
- */ \
-static inline void \
-da_monitor_set_state_##name(struct da_monitor *da_mon, enum states_##name state) \
-{ \
- da_mon->curr_state = state; \
-} \
- \
-/* \
* da_monitor_start_##name - start monitoring \
* \
* The monitor will ignore all events until monitoring is set to true. This \
@@ -150,65 +110,81 @@ static inline bool da_monitor_handling_event_##name(struct da_monitor *da_mon)
* Event handler for implicit monitors. Implicit monitor is the one which the
* handler does not need to specify which da_monitor to manipulate. Examples
* of implicit monitor are the per_cpu or the global ones.
+ *
+ * Retry in case there is a race between getting and setting the next state,
+ * warn and reset the monitor if it runs out of retries. The monitor should be
+ * able to handle various orders.
*/
#define DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \
\
static inline bool \
da_event_##name(struct da_monitor *da_mon, enum events_##name event) \
{ \
- type curr_state = da_monitor_curr_state_##name(da_mon); \
- type next_state = model_get_next_state_##name(curr_state, event); \
- \
- if (next_state != INVALID_STATE) { \
- da_monitor_set_state_##name(da_mon, next_state); \
- \
- trace_event_##name(model_get_state_name_##name(curr_state), \
- model_get_event_name_##name(event), \
- model_get_state_name_##name(next_state), \
- model_is_final_state_##name(next_state)); \
- \
- return true; \
+ enum states_##name curr_state, next_state; \
+ \
+ curr_state = READ_ONCE(da_mon->curr_state); \
+ for (int i = 0; i < MAX_DA_RETRY_RACING_EVENTS; i++) { \
+ next_state = model_get_next_state_##name(curr_state, event); \
+ if (next_state == INVALID_STATE) { \
+ cond_react_##name(curr_state, event); \
+ trace_error_##name(model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event)); \
+ return false; \
+ } \
+ if (likely(try_cmpxchg(&da_mon->curr_state, &curr_state, next_state))) { \
+ trace_event_##name(model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event), \
+ model_get_state_name_##name(next_state), \
+ model_is_final_state_##name(next_state)); \
+ return true; \
+ } \
} \
\
- if (rv_reacting_on_##name()) \
- cond_react_##name(format_react_msg_##name(curr_state, event)); \
- \
- trace_error_##name(model_get_state_name_##name(curr_state), \
- model_get_event_name_##name(event)); \
- \
+ trace_rv_retries_error(#name, model_get_event_name_##name(event)); \
+ pr_warn("rv: " __stringify(MAX_DA_RETRY_RACING_EVENTS) \
+ " retries reached for event %s, resetting monitor %s", \
+ model_get_event_name_##name(event), #name); \
return false; \
} \
/*
* Event handler for per_task monitors.
+ *
+ * Retry in case there is a race between getting and setting the next state,
+ * warn and reset the monitor if it runs out of retries. The monitor should be
+ * able to handle various orders.
*/
#define DECLARE_DA_MON_MODEL_HANDLER_PER_TASK(name, type) \
\
static inline bool da_event_##name(struct da_monitor *da_mon, struct task_struct *tsk, \
enum events_##name event) \
{ \
- type curr_state = da_monitor_curr_state_##name(da_mon); \
- type next_state = model_get_next_state_##name(curr_state, event); \
- \
- if (next_state != INVALID_STATE) { \
- da_monitor_set_state_##name(da_mon, next_state); \
- \
- trace_event_##name(tsk->pid, \
- model_get_state_name_##name(curr_state), \
- model_get_event_name_##name(event), \
- model_get_state_name_##name(next_state), \
- model_is_final_state_##name(next_state)); \
- \
- return true; \
+ enum states_##name curr_state, next_state; \
+ \
+ curr_state = READ_ONCE(da_mon->curr_state); \
+ for (int i = 0; i < MAX_DA_RETRY_RACING_EVENTS; i++) { \
+ next_state = model_get_next_state_##name(curr_state, event); \
+ if (next_state == INVALID_STATE) { \
+ cond_react_##name(curr_state, event); \
+ trace_error_##name(tsk->pid, \
+ model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event)); \
+ return false; \
+ } \
+ if (likely(try_cmpxchg(&da_mon->curr_state, &curr_state, next_state))) { \
+ trace_event_##name(tsk->pid, \
+ model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event), \
+ model_get_state_name_##name(next_state), \
+ model_is_final_state_##name(next_state)); \
+ return true; \
+ } \
} \
\
- if (rv_reacting_on_##name()) \
- cond_react_##name(format_react_msg_##name(curr_state, event)); \
- \
- trace_error_##name(tsk->pid, \
- model_get_state_name_##name(curr_state), \
- model_get_event_name_##name(event)); \
- \
+ trace_rv_retries_error(#name, model_get_event_name_##name(event)); \
+ pr_warn("rv: " __stringify(MAX_DA_RETRY_RACING_EVENTS) \
+ " retries reached for event %s, resetting monitor %s", \
+ model_get_event_name_##name(event), #name); \
return false; \
}
@@ -512,6 +488,30 @@ da_handle_start_event_##name(struct task_struct *tsk, enum events_##name event)
__da_handle_event_##name(da_mon, tsk, event); \
\
return 1; \
+} \
+ \
+/* \
+ * da_handle_start_run_event_##name - start monitoring and handle event \
+ * \
+ * This function is used to notify the monitor that the system is in the \
+ * initial state, so the monitor can start monitoring and handling event. \
+ */ \
+static inline bool \
+da_handle_start_run_event_##name(struct task_struct *tsk, enum events_##name event) \
+{ \
+ struct da_monitor *da_mon; \
+ \
+ if (!da_monitor_enabled_##name()) \
+ return 0; \
+ \
+ da_mon = da_get_monitor_##name(tsk); \
+ \
+ if (unlikely(!da_monitoring_##name(da_mon))) \
+ da_monitor_start_##name(da_mon); \
+ \
+ __da_handle_event_##name(da_mon, tsk, event); \
+ \
+ return 1; \
}
/*
diff --git a/include/rv/ltl_monitor.h b/include/rv/ltl_monitor.h
new file mode 100644
index 000000000000..67031a774e3d
--- /dev/null
+++ b/include/rv/ltl_monitor.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/**
+ * This file must be combined with the $(MODEL_NAME).h file generated by
+ * tools/verification/rvgen.
+ */
+
+#include <linux/args.h>
+#include <linux/rv.h>
+#include <linux/stringify.h>
+#include <linux/seq_buf.h>
+#include <rv/instrumentation.h>
+#include <trace/events/task.h>
+#include <trace/events/sched.h>
+
+#ifndef MONITOR_NAME
+#error "Please include $(MODEL_NAME).h generated by rvgen"
+#endif
+
+#ifdef CONFIG_RV_REACTORS
+#define RV_MONITOR_NAME CONCATENATE(rv_, MONITOR_NAME)
+static struct rv_monitor RV_MONITOR_NAME;
+
+static void rv_cond_react(struct task_struct *task)
+{
+ if (!rv_reacting_on() || !RV_MONITOR_NAME.react)
+ return;
+ RV_MONITOR_NAME.react("rv: "__stringify(MONITOR_NAME)": %s[%d]: violation detected\n",
+ task->comm, task->pid);
+}
+#else
+static void rv_cond_react(struct task_struct *task)
+{
+}
+#endif
+
+static int ltl_monitor_slot = RV_PER_TASK_MONITOR_INIT;
+
+static void ltl_atoms_fetch(struct task_struct *task, struct ltl_monitor *mon);
+static void ltl_atoms_init(struct task_struct *task, struct ltl_monitor *mon, bool task_creation);
+
+static struct ltl_monitor *ltl_get_monitor(struct task_struct *task)
+{
+ return &task->rv[ltl_monitor_slot].ltl_mon;
+}
+
+static void ltl_task_init(struct task_struct *task, bool task_creation)
+{
+ struct ltl_monitor *mon = ltl_get_monitor(task);
+
+ memset(&mon->states, 0, sizeof(mon->states));
+
+ for (int i = 0; i < LTL_NUM_ATOM; ++i)
+ __set_bit(i, mon->unknown_atoms);
+
+ ltl_atoms_init(task, mon, task_creation);
+ ltl_atoms_fetch(task, mon);
+}
+
+static void handle_task_newtask(void *data, struct task_struct *task, unsigned long flags)
+{
+ ltl_task_init(task, true);
+}
+
+static int ltl_monitor_init(void)
+{
+ struct task_struct *g, *p;
+ int ret, cpu;
+
+ ret = rv_get_task_monitor_slot();
+ if (ret < 0)
+ return ret;
+
+ ltl_monitor_slot = ret;
+
+ rv_attach_trace_probe(name, task_newtask, handle_task_newtask);
+
+ read_lock(&tasklist_lock);
+
+ for_each_process_thread(g, p)
+ ltl_task_init(p, false);
+
+ for_each_present_cpu(cpu)
+ ltl_task_init(idle_task(cpu), false);
+
+ read_unlock(&tasklist_lock);
+
+ return 0;
+}
+
+static void ltl_monitor_destroy(void)
+{
+ rv_detach_trace_probe(name, task_newtask, handle_task_newtask);
+
+ rv_put_task_monitor_slot(ltl_monitor_slot);
+ ltl_monitor_slot = RV_PER_TASK_MONITOR_INIT;
+}
+
+static void ltl_illegal_state(struct task_struct *task, struct ltl_monitor *mon)
+{
+ CONCATENATE(trace_error_, MONITOR_NAME)(task);
+ rv_cond_react(task);
+}
+
+static void ltl_attempt_start(struct task_struct *task, struct ltl_monitor *mon)
+{
+ if (rv_ltl_all_atoms_known(mon))
+ ltl_start(task, mon);
+}
+
+static inline void ltl_atom_set(struct ltl_monitor *mon, enum ltl_atom atom, bool value)
+{
+ __clear_bit(atom, mon->unknown_atoms);
+ if (value)
+ __set_bit(atom, mon->atoms);
+ else
+ __clear_bit(atom, mon->atoms);
+}
+
+static void
+ltl_trace_event(struct task_struct *task, struct ltl_monitor *mon, unsigned long *next_state)
+{
+ const char *format_str = "%s";
+ DECLARE_SEQ_BUF(atoms, 64);
+ char states[32], next[32];
+ int i;
+
+ if (!CONCATENATE(CONCATENATE(trace_event_, MONITOR_NAME), _enabled)())
+ return;
+
+ snprintf(states, sizeof(states), "%*pbl", RV_MAX_BA_STATES, mon->states);
+ snprintf(next, sizeof(next), "%*pbl", RV_MAX_BA_STATES, next_state);
+
+ for (i = 0; i < LTL_NUM_ATOM; ++i) {
+ if (test_bit(i, mon->atoms)) {
+ seq_buf_printf(&atoms, format_str, ltl_atom_str(i));
+ format_str = ",%s";
+ }
+ }
+
+ CONCATENATE(trace_event_, MONITOR_NAME)(task, states, atoms.buffer, next);
+}
+
+static void ltl_validate(struct task_struct *task, struct ltl_monitor *mon)
+{
+ DECLARE_BITMAP(next_states, RV_MAX_BA_STATES) = {0};
+
+ if (!rv_ltl_valid_state(mon))
+ return;
+
+ for (unsigned int i = 0; i < RV_NUM_BA_STATES; ++i) {
+ if (test_bit(i, mon->states))
+ ltl_possible_next_states(mon, i, next_states);
+ }
+
+ ltl_trace_event(task, mon, next_states);
+
+ memcpy(mon->states, next_states, sizeof(next_states));
+
+ if (!rv_ltl_valid_state(mon))
+ ltl_illegal_state(task, mon);
+}
+
+static void ltl_atom_update(struct task_struct *task, enum ltl_atom atom, bool value)
+{
+ struct ltl_monitor *mon = ltl_get_monitor(task);
+
+ ltl_atom_set(mon, atom, value);
+ ltl_atoms_fetch(task, mon);
+
+ if (!rv_ltl_valid_state(mon)) {
+ ltl_attempt_start(task, mon);
+ return;
+ }
+
+ ltl_validate(task, mon);
+}
+
+static void __maybe_unused ltl_atom_pulse(struct task_struct *task, enum ltl_atom atom, bool value)
+{
+ struct ltl_monitor *mon = ltl_get_monitor(task);
+
+ ltl_atom_update(task, atom, value);
+
+ ltl_atom_set(mon, atom, !value);
+ ltl_validate(task, mon);
+}
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 68dd49947d04..6d6500148c4b 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -184,6 +184,11 @@ struct scsi_device {
*/
unsigned force_runtime_start_on_system_start:1;
+ /*
+ * Set if the device is an ATA device.
+ */
+ unsigned is_ata:1;
+
unsigned removable:1;
unsigned changed:1; /* Data invalid due to media change */
unsigned busy:1; /* Used to prevent races */
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index d02b55261307..b908aacfef48 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -383,6 +383,8 @@ struct fc_rport { /* aka fc_starget_attrs */
struct work_struct stgt_delete_work;
struct work_struct rport_delete_work;
struct request_queue *rqst_q; /* bsg support */
+
+ struct workqueue_struct *devloss_work_q;
} __attribute__((aligned(sizeof(unsigned long))));
/* bit field values for struct fc_rport "flags" field: */
@@ -576,7 +578,6 @@ struct fc_host_attrs {
/* work queues for rport state manipulation */
struct workqueue_struct *work_q;
- struct workqueue_struct *devloss_work_q;
/* bsg support */
struct request_queue *rqst_q;
@@ -654,8 +655,6 @@ struct fc_host_attrs {
(((struct fc_host_attrs *)(x)->shost_data)->npiv_vports_inuse)
#define fc_host_work_q(x) \
(((struct fc_host_attrs *)(x)->shost_data)->work_q)
-#define fc_host_devloss_work_q(x) \
- (((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q)
#define fc_host_dev_loss_tmo(x) \
(((struct fc_host_attrs *)(x)->shost_data)->dev_loss_tmo)
#define fc_host_max_ct_payload(x) \
diff --git a/include/soc/spacemit/k1-syscon.h b/include/soc/spacemit/k1-syscon.h
new file mode 100644
index 000000000000..c59bd7a38e5b
--- /dev/null
+++ b/include/soc/spacemit/k1-syscon.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/* SpacemiT clock and reset driver definitions for the K1 SoC */
+
+#ifndef __SOC_K1_SYSCON_H__
+#define __SOC_K1_SYSCON_H__
+
+/* Auxiliary device used to represent a CCU reset controller */
+struct spacemit_ccu_adev {
+ struct auxiliary_device adev;
+ struct regmap *regmap;
+};
+
+static inline struct spacemit_ccu_adev *
+to_spacemit_ccu_adev(struct auxiliary_device *adev)
+{
+ return container_of(adev, struct spacemit_ccu_adev, adev);
+}
+
+/* APBS register offset */
+#define APBS_PLL1_SWCR1 0x100
+#define APBS_PLL1_SWCR2 0x104
+#define APBS_PLL1_SWCR3 0x108
+#define APBS_PLL2_SWCR1 0x118
+#define APBS_PLL2_SWCR2 0x11c
+#define APBS_PLL2_SWCR3 0x120
+#define APBS_PLL3_SWCR1 0x124
+#define APBS_PLL3_SWCR2 0x128
+#define APBS_PLL3_SWCR3 0x12c
+
+/* MPMU register offset */
+#define MPMU_POSR 0x0010
+#define POSR_PLL1_LOCK BIT(27)
+#define POSR_PLL2_LOCK BIT(28)
+#define POSR_PLL3_LOCK BIT(29)
+#define MPMU_SUCCR 0x0014
+#define MPMU_ISCCR 0x0044
+#define MPMU_WDTPCR 0x0200
+#define MPMU_RIPCCR 0x0210
+#define MPMU_ACGR 0x1024
+#define MPMU_APBCSCR 0x1050
+#define MPMU_SUCCR_1 0x10b0
+
+/* APBC register offset */
+#define APBC_UART1_CLK_RST 0x00
+#define APBC_UART2_CLK_RST 0x04
+#define APBC_GPIO_CLK_RST 0x08
+#define APBC_PWM0_CLK_RST 0x0c
+#define APBC_PWM1_CLK_RST 0x10
+#define APBC_PWM2_CLK_RST 0x14
+#define APBC_PWM3_CLK_RST 0x18
+#define APBC_TWSI8_CLK_RST 0x20
+#define APBC_UART3_CLK_RST 0x24
+#define APBC_RTC_CLK_RST 0x28
+#define APBC_TWSI0_CLK_RST 0x2c
+#define APBC_TWSI1_CLK_RST 0x30
+#define APBC_TIMERS1_CLK_RST 0x34
+#define APBC_TWSI2_CLK_RST 0x38
+#define APBC_AIB_CLK_RST 0x3c
+#define APBC_TWSI4_CLK_RST 0x40
+#define APBC_TIMERS2_CLK_RST 0x44
+#define APBC_ONEWIRE_CLK_RST 0x48
+#define APBC_TWSI5_CLK_RST 0x4c
+#define APBC_DRO_CLK_RST 0x58
+#define APBC_IR_CLK_RST 0x5c
+#define APBC_TWSI6_CLK_RST 0x60
+#define APBC_COUNTER_CLK_SEL 0x64
+#define APBC_TWSI7_CLK_RST 0x68
+#define APBC_TSEN_CLK_RST 0x6c
+#define APBC_UART4_CLK_RST 0x70
+#define APBC_UART5_CLK_RST 0x74
+#define APBC_UART6_CLK_RST 0x78
+#define APBC_SSP3_CLK_RST 0x7c
+#define APBC_SSPA0_CLK_RST 0x80
+#define APBC_SSPA1_CLK_RST 0x84
+#define APBC_IPC_AP2AUD_CLK_RST 0x90
+#define APBC_UART7_CLK_RST 0x94
+#define APBC_UART8_CLK_RST 0x98
+#define APBC_UART9_CLK_RST 0x9c
+#define APBC_CAN0_CLK_RST 0xa0
+#define APBC_PWM4_CLK_RST 0xa8
+#define APBC_PWM5_CLK_RST 0xac
+#define APBC_PWM6_CLK_RST 0xb0
+#define APBC_PWM7_CLK_RST 0xb4
+#define APBC_PWM8_CLK_RST 0xb8
+#define APBC_PWM9_CLK_RST 0xbc
+#define APBC_PWM10_CLK_RST 0xc0
+#define APBC_PWM11_CLK_RST 0xc4
+#define APBC_PWM12_CLK_RST 0xc8
+#define APBC_PWM13_CLK_RST 0xcc
+#define APBC_PWM14_CLK_RST 0xd0
+#define APBC_PWM15_CLK_RST 0xd4
+#define APBC_PWM16_CLK_RST 0xd8
+#define APBC_PWM17_CLK_RST 0xdc
+#define APBC_PWM18_CLK_RST 0xe0
+#define APBC_PWM19_CLK_RST 0xe4
+
+/* APMU register offset */
+#define APMU_JPG_CLK_RES_CTRL 0x020
+#define APMU_CSI_CCIC2_CLK_RES_CTRL 0x024
+#define APMU_ISP_CLK_RES_CTRL 0x038
+#define APMU_LCD_CLK_RES_CTRL1 0x044
+#define APMU_LCD_SPI_CLK_RES_CTRL 0x048
+#define APMU_LCD_CLK_RES_CTRL2 0x04c
+#define APMU_CCIC_CLK_RES_CTRL 0x050
+#define APMU_SDH0_CLK_RES_CTRL 0x054
+#define APMU_SDH1_CLK_RES_CTRL 0x058
+#define APMU_USB_CLK_RES_CTRL 0x05c
+#define APMU_QSPI_CLK_RES_CTRL 0x060
+#define APMU_DMA_CLK_RES_CTRL 0x064
+#define APMU_AES_CLK_RES_CTRL 0x068
+#define APMU_VPU_CLK_RES_CTRL 0x0a4
+#define APMU_GPU_CLK_RES_CTRL 0x0cc
+#define APMU_SDH2_CLK_RES_CTRL 0x0e0
+#define APMU_PMUA_MC_CTRL 0x0e8
+#define APMU_PMU_CC2_AP 0x100
+#define APMU_PMUA_EM_CLK_RES_CTRL 0x104
+#define APMU_AUDIO_CLK_RES_CTRL 0x14c
+#define APMU_HDMI_CLK_RES_CTRL 0x1b8
+#define APMU_CCI550_CLK_CTRL 0x300
+#define APMU_ACLK_CLK_CTRL 0x388
+#define APMU_CPU_C0_CLK_CTRL 0x38C
+#define APMU_CPU_C1_CLK_CTRL 0x390
+#define APMU_PCIE_CLK_RES_CTRL_0 0x3cc
+#define APMU_PCIE_CLK_RES_CTRL_1 0x3d4
+#define APMU_PCIE_CLK_RES_CTRL_2 0x3dc
+#define APMU_EMAC0_CLK_RES_CTRL 0x3e4
+#define APMU_EMAC1_CLK_RES_CTRL 0x3ec
+
+/* RCPU register offsets */
+#define RCPU_SSP0_CLK_RST 0x0028
+#define RCPU_I2C0_CLK_RST 0x0030
+#define RCPU_UART1_CLK_RST 0x003c
+#define RCPU_CAN_CLK_RST 0x0048
+#define RCPU_IR_CLK_RST 0x004c
+#define RCPU_UART0_CLK_RST 0x00d8
+#define AUDIO_HDMI_CLK_CTRL 0x2044
+
+/* RCPU2 register offsets */
+#define RCPU2_PWM0_CLK_RST 0x0000
+#define RCPU2_PWM1_CLK_RST 0x0004
+#define RCPU2_PWM2_CLK_RST 0x0008
+#define RCPU2_PWM3_CLK_RST 0x000c
+#define RCPU2_PWM4_CLK_RST 0x0010
+#define RCPU2_PWM5_CLK_RST 0x0014
+#define RCPU2_PWM6_CLK_RST 0x0018
+#define RCPU2_PWM7_CLK_RST 0x001c
+#define RCPU2_PWM8_CLK_RST 0x0020
+#define RCPU2_PWM9_CLK_RST 0x0024
+
+/* APBC2 register offsets */
+#define APBC2_UART1_CLK_RST 0x0000
+#define APBC2_SSP2_CLK_RST 0x0004
+#define APBC2_TWSI3_CLK_RST 0x0008
+#define APBC2_RTC_CLK_RST 0x000c
+#define APBC2_TIMERS0_CLK_RST 0x0010
+#define APBC2_KPC_CLK_RST 0x0014
+#define APBC2_GPIO_CLK_RST 0x001c
+
+#endif /* __SOC_K1_SYSCON_H__ */
diff --git a/include/trace/events/alarmtimer.h b/include/trace/events/alarmtimer.h
index 13483c7ca70b..8e9c76a7f21b 100644
--- a/include/trace/events/alarmtimer.h
+++ b/include/trace/events/alarmtimer.h
@@ -20,6 +20,7 @@ TRACE_DEFINE_ENUM(ALARM_BOOTTIME_FREEZER);
{ 1 << ALARM_REALTIME_FREEZER, "REALTIME Freezer" }, \
{ 1 << ALARM_BOOTTIME_FREEZER, "BOOTTIME Freezer" })
+#ifdef CONFIG_RTC_CLASS
TRACE_EVENT(alarmtimer_suspend,
TP_PROTO(ktime_t expires, int flag),
@@ -41,6 +42,7 @@ TRACE_EVENT(alarmtimer_suspend,
__entry->expires
)
);
+#endif /* CONFIG_RTC_CLASS */
DECLARE_EVENT_CLASS(alarm_class,
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index d54fe354b390..7e418f065b94 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -687,7 +687,6 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
__field( loff_t, range_start )
__field( loff_t, range_end )
__field( char, for_kupdate )
- __field( char, for_reclaim )
__field( char, range_cyclic )
__field( unsigned long, writeback_index )
__field( u64, root_objectid )
@@ -701,7 +700,6 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
__entry->range_start = wbc->range_start;
__entry->range_end = wbc->range_end;
__entry->for_kupdate = wbc->for_kupdate;
- __entry->for_reclaim = wbc->for_reclaim;
__entry->range_cyclic = wbc->range_cyclic;
__entry->writeback_index = inode->i_mapping->writeback_index;
__entry->root_objectid = btrfs_root_id(BTRFS_I(inode)->root);
@@ -710,13 +708,12 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
TP_printk_btrfs("root=%llu(%s) ino=%llu page_index=%lu "
"nr_to_write=%ld pages_skipped=%ld range_start=%llu "
"range_end=%llu for_kupdate=%d "
- "for_reclaim=%d range_cyclic=%d writeback_index=%lu",
+ "range_cyclic=%d writeback_index=%lu",
show_root_type(__entry->root_objectid),
__entry->ino, __entry->index,
__entry->nr_to_write, __entry->pages_skipped,
__entry->range_start, __entry->range_end,
- __entry->for_kupdate,
- __entry->for_reclaim, __entry->range_cyclic,
+ __entry->for_kupdate, __entry->range_cyclic,
__entry->writeback_index)
);
diff --git a/include/trace/events/cgroup.h b/include/trace/events/cgroup.h
index 7d332387be6c..ba9229af9a34 100644
--- a/include/trace/events/cgroup.h
+++ b/include/trace/events/cgroup.h
@@ -257,53 +257,6 @@ DEFINE_EVENT(cgroup_rstat, cgroup_rstat_unlock,
TP_ARGS(cgrp, cpu, contended)
);
-/*
- * Related to per CPU locks:
- * global rstat_base_cpu_lock for base stats
- * cgroup_subsys::rstat_ss_cpu_lock for subsystem stats
- */
-DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_lock_contended,
-
- TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
-
- TP_ARGS(cgrp, cpu, contended)
-);
-
-DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_lock_contended_fastpath,
-
- TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
-
- TP_ARGS(cgrp, cpu, contended)
-);
-
-DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_locked,
-
- TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
-
- TP_ARGS(cgrp, cpu, contended)
-);
-
-DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_locked_fastpath,
-
- TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
-
- TP_ARGS(cgrp, cpu, contended)
-);
-
-DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_unlock,
-
- TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
-
- TP_ARGS(cgrp, cpu, contended)
-);
-
-DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_unlock_fastpath,
-
- TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
-
- TP_ARGS(cgrp, cpu, contended)
-);
-
#endif /* _TRACE_CGROUP_H */
/* This part must be outside protection */
diff --git a/include/trace/events/damon.h b/include/trace/events/damon.h
index da4bd9fd1162..852d725afea2 100644
--- a/include/trace/events/damon.h
+++ b/include/trace/events/damon.h
@@ -9,6 +9,30 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
+TRACE_EVENT(damos_esz,
+
+ TP_PROTO(unsigned int context_idx, unsigned int scheme_idx,
+ unsigned long esz),
+
+ TP_ARGS(context_idx, scheme_idx, esz),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, context_idx)
+ __field(unsigned int, scheme_idx)
+ __field(unsigned long, esz)
+ ),
+
+ TP_fast_assign(
+ __entry->context_idx = context_idx;
+ __entry->scheme_idx = scheme_idx;
+ __entry->esz = esz;
+ ),
+
+ TP_printk("ctx_idx=%u scheme_idx=%u esz=%lu",
+ __entry->context_idx, __entry->scheme_idx,
+ __entry->esz)
+);
+
TRACE_EVENT_CONDITION(damos_before_apply,
TP_PROTO(unsigned int context_idx, unsigned int scheme_idx,
@@ -48,6 +72,23 @@ TRACE_EVENT_CONDITION(damos_before_apply,
__entry->nr_accesses, __entry->age)
);
+TRACE_EVENT(damon_monitor_intervals_tune,
+
+ TP_PROTO(unsigned long sample_us),
+
+ TP_ARGS(sample_us),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, sample_us)
+ ),
+
+ TP_fast_assign(
+ __entry->sample_us = sample_us;
+ ),
+
+ TP_printk("sample_us=%lu", __entry->sample_us)
+);
+
TRACE_EVENT(damon_aggregated,
TP_PROTO(unsigned int target_id, struct damon_region *r,
diff --git a/include/trace/events/dma_fence.h b/include/trace/events/dma_fence.h
index a4de3df8500b..4814a65b68dc 100644
--- a/include/trace/events/dma_fence.h
+++ b/include/trace/events/dma_fence.h
@@ -16,6 +16,36 @@ DECLARE_EVENT_CLASS(dma_fence,
TP_ARGS(fence),
TP_STRUCT__entry(
+ __string(driver, dma_fence_driver_name(fence))
+ __string(timeline, dma_fence_timeline_name(fence))
+ __field(unsigned int, context)
+ __field(unsigned int, seqno)
+ ),
+
+ TP_fast_assign(
+ __assign_str(driver);
+ __assign_str(timeline);
+ __entry->context = fence->context;
+ __entry->seqno = fence->seqno;
+ ),
+
+ TP_printk("driver=%s timeline=%s context=%u seqno=%u",
+ __get_str(driver), __get_str(timeline), __entry->context,
+ __entry->seqno)
+);
+
+/*
+ * Safe only for call sites which are guaranteed to not race with fence
+ * signaling,holding the fence->lock and having checked for not signaled, or the
+ * signaling path itself.
+ */
+DECLARE_EVENT_CLASS(dma_fence_unsignaled,
+
+ TP_PROTO(struct dma_fence *fence),
+
+ TP_ARGS(fence),
+
+ TP_STRUCT__entry(
__string(driver, fence->ops->get_driver_name(fence))
__string(timeline, fence->ops->get_timeline_name(fence))
__field(unsigned int, context)
@@ -34,14 +64,14 @@ DECLARE_EVENT_CLASS(dma_fence,
__entry->seqno)
);
-DEFINE_EVENT(dma_fence, dma_fence_emit,
+DEFINE_EVENT(dma_fence_unsignaled, dma_fence_emit,
TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(dma_fence, dma_fence_init,
+DEFINE_EVENT(dma_fence_unsignaled, dma_fence_init,
TP_PROTO(struct dma_fence *fence),
@@ -55,14 +85,14 @@ DEFINE_EVENT(dma_fence, dma_fence_destroy,
TP_ARGS(fence)
);
-DEFINE_EVENT(dma_fence, dma_fence_enable_signal,
+DEFINE_EVENT(dma_fence_unsignaled, dma_fence_enable_signal,
TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(dma_fence, dma_fence_signaled,
+DEFINE_EVENT(dma_fence_unsignaled, dma_fence_signaled,
TP_PROTO(struct dma_fence *fence),
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 6f9cf2811733..a374e7ea7e57 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -23,10 +23,7 @@ struct partial_cluster;
#define show_mballoc_flags(flags) __print_flags(flags, "|", \
{ EXT4_MB_HINT_MERGE, "HINT_MERGE" }, \
- { EXT4_MB_HINT_RESERVED, "HINT_RESV" }, \
- { EXT4_MB_HINT_METADATA, "HINT_MDATA" }, \
{ EXT4_MB_HINT_FIRST, "HINT_FIRST" }, \
- { EXT4_MB_HINT_BEST, "HINT_BEST" }, \
{ EXT4_MB_HINT_DATA, "HINT_DATA" }, \
{ EXT4_MB_HINT_NOPREALLOC, "HINT_NOPREALLOC" }, \
{ EXT4_MB_HINT_GROUP_ALLOC, "HINT_GRP_ALLOC" }, \
@@ -483,16 +480,17 @@ TRACE_EVENT(ext4_writepages,
(unsigned long) __entry->writeback_index)
);
-TRACE_EVENT(ext4_da_write_pages,
- TP_PROTO(struct inode *inode, pgoff_t first_page,
+TRACE_EVENT(ext4_da_write_folios_start,
+ TP_PROTO(struct inode *inode, loff_t start_pos, loff_t next_pos,
struct writeback_control *wbc),
- TP_ARGS(inode, first_page, wbc),
+ TP_ARGS(inode, start_pos, next_pos, wbc),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
- __field( pgoff_t, first_page )
+ __field( loff_t, start_pos )
+ __field( loff_t, next_pos )
__field( long, nr_to_write )
__field( int, sync_mode )
),
@@ -500,18 +498,48 @@ TRACE_EVENT(ext4_da_write_pages,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->first_page = first_page;
+ __entry->start_pos = start_pos;
+ __entry->next_pos = next_pos;
__entry->nr_to_write = wbc->nr_to_write;
__entry->sync_mode = wbc->sync_mode;
),
- TP_printk("dev %d,%d ino %lu first_page %lu nr_to_write %ld "
- "sync_mode %d",
+ TP_printk("dev %d,%d ino %lu start_pos 0x%llx next_pos 0x%llx nr_to_write %ld sync_mode %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->first_page,
+ (unsigned long) __entry->ino, __entry->start_pos, __entry->next_pos,
__entry->nr_to_write, __entry->sync_mode)
);
+TRACE_EVENT(ext4_da_write_folios_end,
+ TP_PROTO(struct inode *inode, loff_t start_pos, loff_t next_pos,
+ struct writeback_control *wbc, int ret),
+
+ TP_ARGS(inode, start_pos, next_pos, wbc, ret),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( loff_t, start_pos )
+ __field( loff_t, next_pos )
+ __field( long, nr_to_write )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->start_pos = start_pos;
+ __entry->next_pos = next_pos;
+ __entry->nr_to_write = wbc->nr_to_write;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev %d,%d ino %lu start_pos 0x%llx next_pos 0x%llx nr_to_write %ld ret %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino, __entry->start_pos, __entry->next_pos,
+ __entry->nr_to_write, __entry->ret)
+);
+
TRACE_EVENT(ext4_da_write_pages_extent,
TP_PROTO(struct inode *inode, struct ext4_map_blocks *map),
diff --git a/include/trace/events/fs_dax.h b/include/trace/events/fs_dax.h
index 76b56f78abb0..50ebc1290ab0 100644
--- a/include/trace/events/fs_dax.h
+++ b/include/trace/events/fs_dax.h
@@ -15,7 +15,7 @@ DECLARE_EVENT_CLASS(dax_pmd_fault_class,
__field(unsigned long, ino)
__field(unsigned long, vm_start)
__field(unsigned long, vm_end)
- __field(unsigned long, vm_flags)
+ __field(vm_flags_t, vm_flags)
__field(unsigned long, address)
__field(pgoff_t, pgoff)
__field(pgoff_t, max_pgoff)
@@ -67,7 +67,7 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
TP_ARGS(inode, vmf, zero_folio, radix_entry),
TP_STRUCT__entry(
__field(unsigned long, ino)
- __field(unsigned long, vm_flags)
+ __field(vm_flags_t, vm_flags)
__field(unsigned long, address)
__field(struct folio *, zero_folio)
__field(void *, radix_entry)
@@ -107,7 +107,7 @@ DECLARE_EVENT_CLASS(dax_pte_fault_class,
TP_ARGS(inode, vmf, result),
TP_STRUCT__entry(
__field(unsigned long, ino)
- __field(unsigned long, vm_flags)
+ __field(vm_flags_t, vm_flags)
__field(unsigned long, address)
__field(pgoff_t, pgoff)
__field(dev_t, dev)
diff --git a/include/trace/events/ipi.h b/include/trace/events/ipi.h
index 3de9bfc982ce..9912f0ded81d 100644
--- a/include/trace/events/ipi.h
+++ b/include/trace/events/ipi.h
@@ -7,34 +7,6 @@
#include <linux/tracepoint.h>
-/**
- * ipi_raise - called when a smp cross call is made
- *
- * @mask: mask of recipient CPUs for the IPI
- * @reason: string identifying the IPI purpose
- *
- * It is necessary for @reason to be a static string declared with
- * __tracepoint_string.
- */
-TRACE_EVENT(ipi_raise,
-
- TP_PROTO(const struct cpumask *mask, const char *reason),
-
- TP_ARGS(mask, reason),
-
- TP_STRUCT__entry(
- __bitmask(target_cpus, nr_cpumask_bits)
- __field(const char *, reason)
- ),
-
- TP_fast_assign(
- __assign_bitmask(target_cpus, cpumask_bits(mask), nr_cpumask_bits);
- __entry->reason = reason;
- ),
-
- TP_printk("target_mask=%s (%s)", __get_bitmask(target_cpus), __entry->reason)
-);
-
TRACE_EVENT(ipi_send_cpu,
TP_PROTO(const unsigned int cpu, unsigned long callsite, void *callback),
@@ -79,6 +51,35 @@ TRACE_EVENT(ipi_send_cpumask,
__get_cpumask(cpumask), __entry->callsite, __entry->callback)
);
+#ifdef CONFIG_HAVE_EXTRA_IPI_TRACEPOINTS
+/**
+ * ipi_raise - called when a smp cross call is made
+ *
+ * @mask: mask of recipient CPUs for the IPI
+ * @reason: string identifying the IPI purpose
+ *
+ * It is necessary for @reason to be a static string declared with
+ * __tracepoint_string.
+ */
+TRACE_EVENT(ipi_raise,
+
+ TP_PROTO(const struct cpumask *mask, const char *reason),
+
+ TP_ARGS(mask, reason),
+
+ TP_STRUCT__entry(
+ __bitmask(target_cpus, nr_cpumask_bits)
+ __field(const char *, reason)
+ ),
+
+ TP_fast_assign(
+ __assign_bitmask(target_cpus, cpumask_bits(mask), nr_cpumask_bits);
+ __entry->reason = reason;
+ ),
+
+ TP_printk("target_mask=%s (%s)", __get_bitmask(target_cpus), __entry->reason)
+);
+
DECLARE_EVENT_CLASS(ipi_handler,
TP_PROTO(const char *reason),
@@ -127,6 +128,7 @@ DEFINE_EVENT(ipi_handler, ipi_exit,
TP_ARGS(reason)
);
+#endif /* CONFIG_HAVE_EXTRA_IPI_TRACEPOINTS */
#endif /* _TRACE_IPI_H */
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index f74925a6cf69..474358773abe 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -304,44 +304,6 @@ TRACE_EVENT(mm_page_alloc_extfrag,
__entry->change_ownership)
);
-TRACE_EVENT(mm_alloc_contig_migrate_range_info,
-
- TP_PROTO(unsigned long start,
- unsigned long end,
- unsigned long nr_migrated,
- unsigned long nr_reclaimed,
- unsigned long nr_mapped,
- int migratetype),
-
- TP_ARGS(start, end, nr_migrated, nr_reclaimed, nr_mapped, migratetype),
-
- TP_STRUCT__entry(
- __field(unsigned long, start)
- __field(unsigned long, end)
- __field(unsigned long, nr_migrated)
- __field(unsigned long, nr_reclaimed)
- __field(unsigned long, nr_mapped)
- __field(int, migratetype)
- ),
-
- TP_fast_assign(
- __entry->start = start;
- __entry->end = end;
- __entry->nr_migrated = nr_migrated;
- __entry->nr_reclaimed = nr_reclaimed;
- __entry->nr_mapped = nr_mapped;
- __entry->migratetype = migratetype;
- ),
-
- TP_printk("start=0x%lx end=0x%lx migratetype=%d nr_migrated=%lu nr_reclaimed=%lu nr_mapped=%lu",
- __entry->start,
- __entry->end,
- __entry->migratetype,
- __entry->nr_migrated,
- __entry->nr_reclaimed,
- __entry->nr_mapped)
-);
-
TRACE_EVENT(mm_setup_per_zone_wmarks,
TP_PROTO(struct zone *zone),
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index fc7d0f8ff078..8b7252b8d751 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -82,95 +82,15 @@ TRACE_EVENT(kvm_set_irq,
TP_printk("gsi %u level %d source %d",
__entry->gsi, __entry->level, __entry->irq_source_id)
);
-#endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */
-
-#if defined(__KVM_HAVE_IOAPIC)
-#define kvm_deliver_mode \
- {0x0, "Fixed"}, \
- {0x1, "LowPrio"}, \
- {0x2, "SMI"}, \
- {0x3, "Res3"}, \
- {0x4, "NMI"}, \
- {0x5, "INIT"}, \
- {0x6, "SIPI"}, \
- {0x7, "ExtINT"}
-
-TRACE_EVENT(kvm_ioapic_set_irq,
- TP_PROTO(__u64 e, int pin, bool coalesced),
- TP_ARGS(e, pin, coalesced),
-
- TP_STRUCT__entry(
- __field( __u64, e )
- __field( int, pin )
- __field( bool, coalesced )
- ),
-
- TP_fast_assign(
- __entry->e = e;
- __entry->pin = pin;
- __entry->coalesced = coalesced;
- ),
-
- TP_printk("pin %u dst %x vec %u (%s|%s|%s%s)%s",
- __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
- __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
- (__entry->e & (1<<11)) ? "logical" : "physical",
- (__entry->e & (1<<15)) ? "level" : "edge",
- (__entry->e & (1<<16)) ? "|masked" : "",
- __entry->coalesced ? " (coalesced)" : "")
-);
-TRACE_EVENT(kvm_ioapic_delayed_eoi_inj,
- TP_PROTO(__u64 e),
- TP_ARGS(e),
-
- TP_STRUCT__entry(
- __field( __u64, e )
- ),
-
- TP_fast_assign(
- __entry->e = e;
- ),
-
- TP_printk("dst %x vec %u (%s|%s|%s%s)",
- (u8)(__entry->e >> 56), (u8)__entry->e,
- __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
- (__entry->e & (1<<11)) ? "logical" : "physical",
- (__entry->e & (1<<15)) ? "level" : "edge",
- (__entry->e & (1<<16)) ? "|masked" : "")
-);
-
-TRACE_EVENT(kvm_msi_set_irq,
- TP_PROTO(__u64 address, __u64 data),
- TP_ARGS(address, data),
-
- TP_STRUCT__entry(
- __field( __u64, address )
- __field( __u64, data )
- ),
-
- TP_fast_assign(
- __entry->address = address;
- __entry->data = data;
- ),
-
- TP_printk("dst %llx vec %u (%s|%s|%s%s)",
- (u8)(__entry->address >> 12) | ((__entry->address >> 32) & 0xffffff00),
- (u8)__entry->data,
- __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
- (__entry->address & (1<<2)) ? "logical" : "physical",
- (__entry->data & (1<<15)) ? "level" : "edge",
- (__entry->address & (1<<3)) ? "|rh" : "")
-);
+#ifdef CONFIG_KVM_IOAPIC
#define kvm_irqchips \
{KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
{KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
{KVM_IRQCHIP_IOAPIC, "IOAPIC"}
-#endif /* defined(__KVM_HAVE_IOAPIC) */
-
-#if defined(CONFIG_HAVE_KVM_IRQCHIP)
+#endif /* CONFIG_KVM_IOAPIC */
#ifdef kvm_irqchips
#define kvm_ack_irq_string "irqchip %s pin %u"
@@ -473,6 +393,33 @@ TRACE_EVENT(kvm_dirty_ring_exit,
TP_printk("vcpu %d", __entry->vcpu_id)
);
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+/*
+ * @start: Starting address of guest memory range
+ * @end: End address of guest memory range
+ * @attr: The value of the attribute being set.
+ */
+TRACE_EVENT(kvm_vm_set_mem_attributes,
+ TP_PROTO(gfn_t start, gfn_t end, unsigned long attr),
+ TP_ARGS(start, end, attr),
+
+ TP_STRUCT__entry(
+ __field(gfn_t, start)
+ __field(gfn_t, end)
+ __field(unsigned long, attr)
+ ),
+
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->end = end;
+ __entry->attr = attr;
+ ),
+
+ TP_printk("%#016llx -- %#016llx [0x%lx]",
+ __entry->start, __entry->end, __entry->attr)
+);
+#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
+
TRACE_EVENT(kvm_unmap_hva_range,
TP_PROTO(unsigned long start, unsigned long end),
TP_ARGS(start, end),
diff --git a/include/trace/events/mmap.h b/include/trace/events/mmap.h
index f8d61485de16..ee2843a5daef 100644
--- a/include/trace/events/mmap.h
+++ b/include/trace/events/mmap.h
@@ -43,58 +43,6 @@ TRACE_EVENT(vm_unmapped_area,
__entry->align_offset)
);
-TRACE_EVENT(vma_mas_szero,
- TP_PROTO(struct maple_tree *mt, unsigned long start,
- unsigned long end),
-
- TP_ARGS(mt, start, end),
-
- TP_STRUCT__entry(
- __field(struct maple_tree *, mt)
- __field(unsigned long, start)
- __field(unsigned long, end)
- ),
-
- TP_fast_assign(
- __entry->mt = mt;
- __entry->start = start;
- __entry->end = end;
- ),
-
- TP_printk("mt_mod %p, (NULL), SNULL, %lu, %lu,",
- __entry->mt,
- (unsigned long) __entry->start,
- (unsigned long) __entry->end
- )
-);
-
-TRACE_EVENT(vma_store,
- TP_PROTO(struct maple_tree *mt, struct vm_area_struct *vma),
-
- TP_ARGS(mt, vma),
-
- TP_STRUCT__entry(
- __field(struct maple_tree *, mt)
- __field(struct vm_area_struct *, vma)
- __field(unsigned long, vm_start)
- __field(unsigned long, vm_end)
- ),
-
- TP_fast_assign(
- __entry->mt = mt;
- __entry->vma = vma;
- __entry->vm_start = vma->vm_start;
- __entry->vm_end = vma->vm_end - 1;
- ),
-
- TP_printk("mt_mod %p, (%p), STORE, %lu, %lu,",
- __entry->mt, __entry->vma,
- (unsigned long) __entry->vm_start,
- (unsigned long) __entry->vm_end
- )
-);
-
-
TRACE_EVENT(exit_mmap,
TP_PROTO(struct mm_struct *mm),
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 6c631eec23e3..82904291c2b8 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -62,6 +62,7 @@ TRACE_EVENT(cpu_idle_miss,
(unsigned long)__entry->state, (__entry->below)?"below":"above")
);
+#ifdef CONFIG_ARM_PSCI_CPUIDLE
DECLARE_EVENT_CLASS(psci_domain_idle,
TP_PROTO(unsigned int cpu_id, unsigned int state, bool s2idle),
@@ -98,28 +99,7 @@ DEFINE_EVENT(psci_domain_idle, psci_domain_idle_exit,
TP_ARGS(cpu_id, state, s2idle)
);
-
-TRACE_EVENT(powernv_throttle,
-
- TP_PROTO(int chip_id, const char *reason, int pmax),
-
- TP_ARGS(chip_id, reason, pmax),
-
- TP_STRUCT__entry(
- __field(int, chip_id)
- __string(reason, reason)
- __field(int, pmax)
- ),
-
- TP_fast_assign(
- __entry->chip_id = chip_id;
- __assign_str(reason);
- __entry->pmax = pmax;
- ),
-
- TP_printk("Chip %d Pmax %d %s", __entry->chip_id,
- __entry->pmax, __get_str(reason))
-);
+#endif
TRACE_EVENT(pstate_sample,
@@ -232,6 +212,7 @@ TRACE_EVENT(cpu_frequency_limits,
(unsigned long)__entry->cpu_id)
);
+#ifdef CONFIG_PM_SLEEP
TRACE_EVENT(device_pm_callback_start,
TP_PROTO(struct device *dev, const char *pm_ops, int event),
@@ -280,6 +261,7 @@ TRACE_EVENT(device_pm_callback_end,
TP_printk("%s %s, err=%d",
__get_str(driver), __get_str(device), __entry->error)
);
+#endif
TRACE_EVENT(suspend_resume,
@@ -337,6 +319,7 @@ DEFINE_EVENT(wakeup_source, wakeup_source_deactivate,
TP_ARGS(name, state)
);
+#ifdef CONFIG_ARCH_OMAP2PLUS
/*
* The power domain events are used for power domains transitions
*/
@@ -368,6 +351,7 @@ DEFINE_EVENT(power_domain, power_domain_target,
TP_ARGS(name, state, cpu_id)
);
+#endif
/*
* CPU latency QoS events used for global CPU latency QoS list updates
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 4e6b2910cec3..9f16ee2603ba 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -628,6 +628,7 @@ TRACE_EVENT(sched_process_hang,
);
#endif /* CONFIG_DETECT_HUNG_TASK */
+#ifdef CONFIG_NUMA_BALANCING
/*
* Tracks migration of tasks from one runqueue to another. Can be used to
* detect if automatic NUMA balancing is bouncing between nodes.
@@ -720,7 +721,6 @@ DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
);
-#ifdef CONFIG_NUMA_BALANCING
#define NUMAB_SKIP_REASON \
EM( NUMAB_SKIP_UNSUITABLE, "unsuitable" ) \
EM( NUMAB_SKIP_SHARED_RO, "shared_ro" ) \
@@ -882,18 +882,22 @@ DECLARE_TRACE(sched_compute_energy,
TP_ARGS(p, dst_cpu, energy, max_util, busy_time));
DECLARE_TRACE(sched_entry,
- TP_PROTO(bool preempt, unsigned long ip),
- TP_ARGS(preempt, ip));
+ TP_PROTO(bool preempt),
+ TP_ARGS(preempt));
DECLARE_TRACE(sched_exit,
- TP_PROTO(bool is_switch, unsigned long ip),
- TP_ARGS(is_switch, ip));
+ TP_PROTO(bool is_switch),
+ TP_ARGS(is_switch));
DECLARE_TRACE_CONDITION(sched_set_state,
TP_PROTO(struct task_struct *tsk, int state),
TP_ARGS(tsk, state),
TP_CONDITION(!!(tsk->__state) != !!state));
+DECLARE_TRACE(sched_set_need_resched,
+ TP_PROTO(struct task_struct *tsk, int cpu, int tif),
+ TP_ARGS(tsk, cpu, tif));
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h
index bf6cc98d9122..c36c72ab7f2b 100644
--- a/include/trace/events/scsi.h
+++ b/include/trace/events/scsi.h
@@ -200,6 +200,14 @@ TRACE_EVENT(scsi_dispatch_cmd_start,
__print_hex(__get_dynamic_array(cmnd), __entry->cmd_len))
);
+#define scsi_rtn_name(result) { result, #result }
+#define show_rtn_name(val) \
+ __print_symbolic(val, \
+ scsi_rtn_name(SCSI_MLQUEUE_HOST_BUSY), \
+ scsi_rtn_name(SCSI_MLQUEUE_DEVICE_BUSY), \
+ scsi_rtn_name(SCSI_MLQUEUE_EH_RETRY), \
+ scsi_rtn_name(SCSI_MLQUEUE_TARGET_BUSY))
+
TRACE_EVENT(scsi_dispatch_cmd_error,
TP_PROTO(struct scsi_cmnd *cmd, int rtn),
@@ -240,14 +248,15 @@ TRACE_EVENT(scsi_dispatch_cmd_error,
TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
" prot_op=%s driver_tag=%d scheduler_tag=%d cmnd=(%s %s raw=%s)" \
- " rtn=%d",
+ " rtn=%s",
__entry->host_no, __entry->channel, __entry->id,
__entry->lun, __entry->data_sglen, __entry->prot_sglen,
show_prot_op_name(__entry->prot_op), __entry->driver_tag,
__entry->scheduler_tag, show_opcode_name(__entry->opcode),
__parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
__print_hex(__get_dynamic_array(cmnd), __entry->cmd_len),
- __entry->rtn)
+ show_rtn_name(__entry->rtn)
+ )
);
DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index 95f59c1a6f57..9d2c36c6a0ed 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -13,17 +13,11 @@
#include <linux/sock_diag.h>
#include <net/rstreason.h>
-/*
- * tcp event with arguments sk and skb
- *
- * Note: this class requires a valid sk pointer; while skb pointer could
- * be NULL.
- */
-DECLARE_EVENT_CLASS(tcp_event_sk_skb,
+TRACE_EVENT(tcp_retransmit_skb,
- TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb, int err),
- TP_ARGS(sk, skb),
+ TP_ARGS(sk, skb, err),
TP_STRUCT__entry(
__field(const void *, skbaddr)
@@ -36,6 +30,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
__array(__u8, daddr, 4)
__array(__u8, saddr_v6, 16)
__array(__u8, daddr_v6, 16)
+ __field(int, err)
),
TP_fast_assign(
@@ -58,21 +53,17 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
+
+ __entry->err = err;
),
- TP_printk("skbaddr=%p skaddr=%p family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s",
+ TP_printk("skbaddr=%p skaddr=%p family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s err=%d",
__entry->skbaddr, __entry->skaddr,
show_family_name(__entry->family),
__entry->sport, __entry->dport, __entry->saddr, __entry->daddr,
__entry->saddr_v6, __entry->daddr_v6,
- show_tcp_state_name(__entry->state))
-);
-
-DEFINE_EVENT(tcp_event_sk_skb, tcp_retransmit_skb,
-
- TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
-
- TP_ARGS(sk, skb)
+ show_tcp_state_name(__entry->state),
+ __entry->err)
);
#undef FN
@@ -692,6 +683,7 @@ DEFINE_EVENT(tcp_ao_event, tcp_ao_handshake_failure,
TP_ARGS(sk, skb, keyid, rnext, maclen)
);
+#ifdef CONFIG_TCP_AO
DEFINE_EVENT(tcp_ao_event, tcp_ao_wrong_maclen,
TP_PROTO(const struct sock *sk, const struct sk_buff *skb,
const __u8 keyid, const __u8 rnext, const __u8 maclen),
@@ -830,6 +822,7 @@ DEFINE_EVENT(tcp_ao_event_sne, tcp_ao_rcv_sne_update,
TP_PROTO(const struct sock *sk, __u32 new_sne),
TP_ARGS(sk, new_sne)
);
+#endif /* CONFIG_TCP_AO */
#endif /* _TRACE_TCP_H */
diff --git a/include/trace/events/thp.h b/include/trace/events/thp.h
index f50048af5fcc..c8fe879d5828 100644
--- a/include/trace/events/thp.h
+++ b/include/trace/events/thp.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
+#ifdef CONFIG_PPC_BOOK3S_64
DECLARE_EVENT_CLASS(hugepage_set,
TP_PROTO(unsigned long addr, unsigned long pte),
@@ -66,6 +67,7 @@ DEFINE_EVENT(hugepage_update, hugepage_update_pud,
TP_PROTO(unsigned long addr, unsigned long pud, unsigned long clr, unsigned long set),
TP_ARGS(addr, pud, clr, set)
);
+#endif /* CONFIG_PPC_BOOK3S_64 */
DECLARE_EVENT_CLASS(migration_pmd,
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 0ff388131fc9..1e23919c0da9 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -459,7 +459,6 @@ DECLARE_EVENT_CLASS(wbc_class,
__field(int, sync_mode)
__field(int, for_kupdate)
__field(int, for_background)
- __field(int, for_reclaim)
__field(int, range_cyclic)
__field(long, range_start)
__field(long, range_end)
@@ -473,23 +472,20 @@ DECLARE_EVENT_CLASS(wbc_class,
__entry->sync_mode = wbc->sync_mode;
__entry->for_kupdate = wbc->for_kupdate;
__entry->for_background = wbc->for_background;
- __entry->for_reclaim = wbc->for_reclaim;
__entry->range_cyclic = wbc->range_cyclic;
__entry->range_start = (long)wbc->range_start;
__entry->range_end = (long)wbc->range_end;
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
),
- TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
- "bgrd=%d reclm=%d cyclic=%d "
- "start=0x%lx end=0x%lx cgroup_ino=%lu",
+ TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d bgrd=%d "
+ "cyclic=%d start=0x%lx end=0x%lx cgroup_ino=%lu",
__entry->name,
__entry->nr_to_write,
__entry->pages_skipped,
__entry->sync_mode,
__entry->for_kupdate,
__entry->for_background,
- __entry->for_reclaim,
__entry->range_cyclic,
__entry->range_start,
__entry->range_end,
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
index d3ef86c97ae3..18c0ac514fcb 100644
--- a/include/trace/events/xdp.h
+++ b/include/trace/events/xdp.h
@@ -168,25 +168,7 @@ DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
#define _trace_xdp_redirect_map_err(dev, xdp, to, map_type, map_id, index, err) \
trace_xdp_redirect_err(dev, xdp, to, err, map_type, map_id, index)
-/* not used anymore, but kept around so as not to break old programs */
-DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map,
- TP_PROTO(const struct net_device *dev,
- const struct bpf_prog *xdp,
- const void *tgt, int err,
- enum bpf_map_type map_type,
- u32 map_id, u32 index),
- TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
-);
-
-DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err,
- TP_PROTO(const struct net_device *dev,
- const struct bpf_prog *xdp,
- const void *tgt, int err,
- enum bpf_map_type map_type,
- u32 map_id, u32 index),
- TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
-);
-
+#ifdef CONFIG_BPF_SYSCALL
TRACE_EVENT(xdp_cpumap_kthread,
TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
@@ -300,6 +282,7 @@ TRACE_EVENT(xdp_devmap_xmit,
__entry->sent, __entry->drops,
__entry->err)
);
+#endif /* CONFIG_BPF_SYSCALL */
/* Expect users already include <net/xdp.h>, but not xdp_priv.h */
#include <net/xdp_priv.h>
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index f333a0ac4ee4..53b5a8c002b1 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -147,6 +147,9 @@
#define SO_PASSRIGHTS 83
+#define SO_INQ 84
+#define SCM_INQ SO_INQ
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 45c4fa13499c..bdedbaccf776 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -1493,6 +1493,8 @@ struct drm_amdgpu_info_hw_ip {
__u32 available_rings;
/** version info: bits 23:16 major, 15:8 minor, 7:0 revision */
__u32 ip_discovery_version;
+ /* Userq available slots */
+ __u32 userq_num_slots;
};
/* GFX metadata BO sizes and alignment info (in bytes) */
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 81202a50dc9e..ea91aa8afde9 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -210,6 +210,10 @@ extern "C" {
#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
+/* 48 bpp RGB */
+#define DRM_FORMAT_RGB161616 fourcc_code('R', 'G', '4', '8') /* [47:0] R:G:B 16:16:16 little endian */
+#define DRM_FORMAT_BGR161616 fourcc_code('B', 'G', '4', '8') /* [47:0] B:G:R 16:16:16 little endian */
+
/* 64 bpp RGB */
#define DRM_FORMAT_XRGB16161616 fourcc_code('X', 'R', '4', '8') /* [63:0] x:R:G:B 16:16:16:16 little endian */
#define DRM_FORMAT_XBGR16161616 fourcc_code('X', 'B', '4', '8') /* [63:0] x:B:G:R 16:16:16:16 little endian */
@@ -218,7 +222,7 @@ extern "C" {
#define DRM_FORMAT_ABGR16161616 fourcc_code('A', 'B', '4', '8') /* [63:0] A:B:G:R 16:16:16:16 little endian */
/*
- * Floating point 64bpp RGB
+ * Half-Floating point - 16b/component
* IEEE 754-2008 binary16 half-precision float
* [15:0] sign:exponent:mantissa 1:5:10
*/
@@ -228,6 +232,20 @@ extern "C" {
#define DRM_FORMAT_ARGB16161616F fourcc_code('A', 'R', '4', 'H') /* [63:0] A:R:G:B 16:16:16:16 little endian */
#define DRM_FORMAT_ABGR16161616F fourcc_code('A', 'B', '4', 'H') /* [63:0] A:B:G:R 16:16:16:16 little endian */
+#define DRM_FORMAT_R16F fourcc_code('R', ' ', ' ', 'H') /* [15:0] R 16 little endian */
+#define DRM_FORMAT_GR1616F fourcc_code('G', 'R', ' ', 'H') /* [31:0] G:R 16:16 little endian */
+#define DRM_FORMAT_BGR161616F fourcc_code('B', 'G', 'R', 'H') /* [47:0] B:G:R 16:16:16 little endian */
+
+/*
+ * Floating point - 32b/component
+ * IEEE 754-2008 binary32 float
+ * [31:0] sign:exponent:mantissa 1:8:23
+ */
+#define DRM_FORMAT_R32F fourcc_code('R', ' ', ' ', 'F') /* [31:0] R 32 little endian */
+#define DRM_FORMAT_GR3232F fourcc_code('G', 'R', ' ', 'F') /* [63:0] R:G 32:32 little endian */
+#define DRM_FORMAT_BGR323232F fourcc_code('B', 'G', 'R', 'F') /* [95:0] R:G:B 32:32:32 little endian */
+#define DRM_FORMAT_ABGR32323232F fourcc_code('A', 'B', '8', 'F') /* [127:0] R:G:B:A 32:32:32:32 little endian */
+
/*
* RGBA format with 10-bit components packed in 64-bit per pixel, with 6 bits
* of unused padding per component:
@@ -378,6 +396,42 @@ extern "C" {
#define DRM_FORMAT_Q401 fourcc_code('Q', '4', '0', '1')
/*
+ * 3 plane YCbCr LSB aligned
+ * In order to use these formats in a similar fashion to MSB aligned ones
+ * implementation can multiply the values by 2^6=64. For that reason the padding
+ * must only contain zeros.
+ * index 0 = Y plane, [15:0] z:Y [6:10] little endian
+ * index 1 = Cr plane, [15:0] z:Cr [6:10] little endian
+ * index 2 = Cb plane, [15:0] z:Cb [6:10] little endian
+ */
+#define DRM_FORMAT_S010 fourcc_code('S', '0', '1', '0') /* 2x2 subsampled Cb (1) and Cr (2) planes 10 bits per channel */
+#define DRM_FORMAT_S210 fourcc_code('S', '2', '1', '0') /* 2x1 subsampled Cb (1) and Cr (2) planes 10 bits per channel */
+#define DRM_FORMAT_S410 fourcc_code('S', '4', '1', '0') /* non-subsampled Cb (1) and Cr (2) planes 10 bits per channel */
+
+/*
+ * 3 plane YCbCr LSB aligned
+ * In order to use these formats in a similar fashion to MSB aligned ones
+ * implementation can multiply the values by 2^4=16. For that reason the padding
+ * must only contain zeros.
+ * index 0 = Y plane, [15:0] z:Y [4:12] little endian
+ * index 1 = Cr plane, [15:0] z:Cr [4:12] little endian
+ * index 2 = Cb plane, [15:0] z:Cb [4:12] little endian
+ */
+#define DRM_FORMAT_S012 fourcc_code('S', '0', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes 12 bits per channel */
+#define DRM_FORMAT_S212 fourcc_code('S', '2', '1', '2') /* 2x1 subsampled Cb (1) and Cr (2) planes 12 bits per channel */
+#define DRM_FORMAT_S412 fourcc_code('S', '4', '1', '2') /* non-subsampled Cb (1) and Cr (2) planes 12 bits per channel */
+
+/*
+ * 3 plane YCbCr
+ * index 0 = Y plane, [15:0] Y little endian
+ * index 1 = Cr plane, [15:0] Cr little endian
+ * index 2 = Cb plane, [15:0] Cb little endian
+ */
+#define DRM_FORMAT_S016 fourcc_code('S', '0', '1', '6') /* 2x2 subsampled Cb (1) and Cr (2) planes 16 bits per channel */
+#define DRM_FORMAT_S216 fourcc_code('S', '2', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes 16 bits per channel */
+#define DRM_FORMAT_S416 fourcc_code('S', '4', '1', '6') /* non-subsampled Cb (1) and Cr (2) planes 16 bits per channel */
+
+/*
* 3 plane YCbCr
* index 0: Y plane, [7:0] Y
* index 1: Cb plane, [7:0] Cb
diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h
index 2f24103f4533..160ee1411d4a 100644
--- a/include/uapi/drm/ivpu_accel.h
+++ b/include/uapi/drm/ivpu_accel.h
@@ -445,6 +445,9 @@ struct drm_ivpu_metric_streamer_get_data {
__u64 data_size;
};
+/* Command queue flags */
+#define DRM_IVPU_CMDQ_FLAG_TURBO 0x00000001
+
/**
* struct drm_ivpu_cmdq_create - Create command queue for job submission
*/
@@ -462,6 +465,17 @@ struct drm_ivpu_cmdq_create {
* %DRM_IVPU_JOB_PRIORITY_REALTIME
*/
__u32 priority;
+ /**
+ * @flags:
+ *
+ * Supported flags:
+ *
+ * %DRM_IVPU_CMDQ_FLAG_TURBO
+ *
+ * Enable low-latency mode for the command queue. The NPU will maximize performance
+ * when executing jobs from such queue at the cost of increased power usage.
+ */
+ __u32 flags;
};
/**
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 2342cb90857e..5c67294edc95 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -91,6 +91,32 @@ struct drm_msm_timespec {
#define MSM_PARAM_UBWC_SWIZZLE 0x12 /* RO */
#define MSM_PARAM_MACROTILE_MODE 0x13 /* RO */
#define MSM_PARAM_UCHE_TRAP_BASE 0x14 /* RO */
+/* PRR (Partially Resident Region) is required for sparse residency: */
+#define MSM_PARAM_HAS_PRR 0x15 /* RO */
+/* MSM_PARAM_EN_VM_BIND is set to 1 to enable VM_BIND ops.
+ *
+ * With VM_BIND enabled, userspace is required to allocate iova and use the
+ * VM_BIND ops for map/unmap ioctls. MSM_INFO_SET_IOVA and MSM_INFO_GET_IOVA
+ * will be rejected. (The latter does not have a sensible meaning when a BO
+ * can have multiple and/or partial mappings.)
+ *
+ * With VM_BIND enabled, userspace does not include a submit_bo table in the
+ * SUBMIT ioctl (this will be rejected), the resident set is determined by
+ * the the VM_BIND ops.
+ *
+ * Enabling VM_BIND will fail on devices which do not have per-process pgtables.
+ * And it is not allowed to disable VM_BIND once it has been enabled.
+ *
+ * Enabling VM_BIND should be done (attempted) prior to allocating any BOs or
+ * submitqueues of type MSM_SUBMITQUEUE_VM_BIND.
+ *
+ * Relatedly, when VM_BIND mode is enabled, the kernel will not try to recover
+ * from GPU faults or failed async VM_BIND ops, in particular because it is
+ * difficult to communicate to userspace which op failed so that userspace
+ * could rewind and try again. When the VM is marked unusable, the SUBMIT
+ * ioctl will throw -EPIPE.
+ */
+#define MSM_PARAM_EN_VM_BIND 0x16 /* WO, once */
/* For backwards compat. The original support for preemption was based on
* a single ring per priority level so # of priority levels equals the #
@@ -114,6 +140,19 @@ struct drm_msm_param {
#define MSM_BO_SCANOUT 0x00000001 /* scanout capable */
#define MSM_BO_GPU_READONLY 0x00000002
+/* Private buffers do not need to be explicitly listed in the SUBMIT
+ * ioctl, unless referenced by a drm_msm_gem_submit_cmd. Private
+ * buffers may NOT be imported/exported or used for scanout (or any
+ * other situation where buffers can be indefinitely pinned, but
+ * cases other than scanout are all kernel owned BOs which are not
+ * visible to userspace).
+ *
+ * In exchange for those constraints, all private BOs associated with
+ * a single context (drm_file) share a single dma_resv, and if there
+ * has been no eviction since the last submit, there are no per-BO
+ * bookeeping to do, significantly cutting the SUBMIT overhead.
+ */
+#define MSM_BO_NO_SHARE 0x00000004
#define MSM_BO_CACHE_MASK 0x000f0000
/* cache modes */
#define MSM_BO_CACHED 0x00010000
@@ -123,6 +162,7 @@ struct drm_msm_param {
#define MSM_BO_FLAGS (MSM_BO_SCANOUT | \
MSM_BO_GPU_READONLY | \
+ MSM_BO_NO_SHARE | \
MSM_BO_CACHE_MASK)
struct drm_msm_gem_new {
@@ -180,6 +220,17 @@ struct drm_msm_gem_cpu_fini {
* Cmdstream Submission:
*/
+#define MSM_SYNCOBJ_RESET 0x00000001 /* Reset syncobj after wait. */
+#define MSM_SYNCOBJ_FLAGS ( \
+ MSM_SYNCOBJ_RESET | \
+ 0)
+
+struct drm_msm_syncobj {
+ __u32 handle; /* in, syncobj handle. */
+ __u32 flags; /* in, from MSM_SUBMIT_SYNCOBJ_FLAGS */
+ __u64 point; /* in, timepoint for timeline syncobjs. */
+};
+
/* The value written into the cmdstream is logically:
*
* ((relocbuf->gpuaddr + reloc_offset) << shift) | or
@@ -221,7 +272,10 @@ struct drm_msm_gem_submit_cmd {
__u32 size; /* in, cmdstream size */
__u32 pad;
__u32 nr_relocs; /* in, number of submit_reloc's */
- __u64 relocs; /* in, ptr to array of submit_reloc's */
+ union {
+ __u64 relocs; /* in, ptr to array of submit_reloc's */
+ __u64 iova; /* cmdstream address (for VM_BIND contexts) */
+ };
};
/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
@@ -269,17 +323,6 @@ struct drm_msm_gem_submit_bo {
MSM_SUBMIT_FENCE_SN_IN | \
0)
-#define MSM_SUBMIT_SYNCOBJ_RESET 0x00000001 /* Reset syncobj after wait. */
-#define MSM_SUBMIT_SYNCOBJ_FLAGS ( \
- MSM_SUBMIT_SYNCOBJ_RESET | \
- 0)
-
-struct drm_msm_gem_submit_syncobj {
- __u32 handle; /* in, syncobj handle. */
- __u32 flags; /* in, from MSM_SUBMIT_SYNCOBJ_FLAGS */
- __u64 point; /* in, timepoint for timeline syncobjs. */
-};
-
/* Each cmdstream submit consists of a table of buffers involved, and
* one or more cmdstream buffers. This allows for conditional execution
* (context-restore), and IB buffers needed for per tile/bin draw cmds.
@@ -293,13 +336,80 @@ struct drm_msm_gem_submit {
__u64 cmds; /* in, ptr to array of submit_cmd's */
__s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
__u32 queueid; /* in, submitqueue id */
- __u64 in_syncobjs; /* in, ptr to array of drm_msm_gem_submit_syncobj */
- __u64 out_syncobjs; /* in, ptr to array of drm_msm_gem_submit_syncobj */
+ __u64 in_syncobjs; /* in, ptr to array of drm_msm_syncobj */
+ __u64 out_syncobjs; /* in, ptr to array of drm_msm_syncobj */
__u32 nr_in_syncobjs; /* in, number of entries in in_syncobj */
__u32 nr_out_syncobjs; /* in, number of entries in out_syncobj. */
__u32 syncobj_stride; /* in, stride of syncobj arrays. */
__u32 pad; /*in, reserved for future use, always 0. */
+};
+#define MSM_VM_BIND_OP_UNMAP 0
+#define MSM_VM_BIND_OP_MAP 1
+#define MSM_VM_BIND_OP_MAP_NULL 2
+
+#define MSM_VM_BIND_OP_DUMP 1
+#define MSM_VM_BIND_OP_FLAGS ( \
+ MSM_VM_BIND_OP_DUMP | \
+ 0)
+
+/**
+ * struct drm_msm_vm_bind_op - bind/unbind op to run
+ */
+struct drm_msm_vm_bind_op {
+ /** @op: one of MSM_VM_BIND_OP_x */
+ __u32 op;
+ /** @handle: GEM object handle, MBZ for UNMAP or MAP_NULL */
+ __u32 handle;
+ /** @obj_offset: Offset into GEM object, MBZ for UNMAP or MAP_NULL */
+ __u64 obj_offset;
+ /** @iova: Address to operate on */
+ __u64 iova;
+ /** @range: Number of bites to to map/unmap */
+ __u64 range;
+ /** @flags: Bitmask of MSM_VM_BIND_OP_FLAG_x */
+ __u32 flags;
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+#define MSM_VM_BIND_FENCE_FD_IN 0x00000001
+#define MSM_VM_BIND_FENCE_FD_OUT 0x00000002
+#define MSM_VM_BIND_FLAGS ( \
+ MSM_VM_BIND_FENCE_FD_IN | \
+ MSM_VM_BIND_FENCE_FD_OUT | \
+ 0)
+
+/**
+ * struct drm_msm_vm_bind - Input of &DRM_IOCTL_MSM_VM_BIND
+ */
+struct drm_msm_vm_bind {
+ /** @flags: in, bitmask of MSM_VM_BIND_x */
+ __u32 flags;
+ /** @nr_ops: the number of bind ops in this ioctl */
+ __u32 nr_ops;
+ /** @fence_fd: in/out fence fd (see MSM_VM_BIND_FENCE_FD_IN/OUT) */
+ __s32 fence_fd;
+ /** @queue_id: in, submitqueue id */
+ __u32 queue_id;
+ /** @in_syncobjs: in, ptr to array of drm_msm_gem_syncobj */
+ __u64 in_syncobjs;
+ /** @out_syncobjs: in, ptr to array of drm_msm_gem_syncobj */
+ __u64 out_syncobjs;
+ /** @nr_in_syncobjs: in, number of entries in in_syncobj */
+ __u32 nr_in_syncobjs;
+ /** @nr_out_syncobjs: in, number of entries in out_syncobj */
+ __u32 nr_out_syncobjs;
+ /** @syncobj_stride: in, stride of syncobj arrays */
+ __u32 syncobj_stride;
+ /** @op_stride: sizeof each struct drm_msm_vm_bind_op in @ops */
+ __u32 op_stride;
+ union {
+ /** @op: used if num_ops == 1 */
+ struct drm_msm_vm_bind_op op;
+ /** @ops: userptr to array of drm_msm_vm_bind_op if num_ops > 1 */
+ __u64 ops;
+ };
};
#define MSM_WAIT_FENCE_BOOST 0x00000001
@@ -345,12 +455,19 @@ struct drm_msm_gem_madvise {
/*
* Draw queues allow the user to set specific submission parameter. Command
* submissions specify a specific submitqueue to use. ID 0 is reserved for
- * backwards compatibility as a "default" submitqueue
+ * backwards compatibility as a "default" submitqueue.
+ *
+ * Because VM_BIND async updates happen on the CPU, they must run on a
+ * virtual queue created with the flag MSM_SUBMITQUEUE_VM_BIND. If we had
+ * a way to do pgtable updates on the GPU, we could drop this restriction.
*/
#define MSM_SUBMITQUEUE_ALLOW_PREEMPT 0x00000001
+#define MSM_SUBMITQUEUE_VM_BIND 0x00000002 /* virtual queue for VM_BIND ops */
+
#define MSM_SUBMITQUEUE_FLAGS ( \
MSM_SUBMITQUEUE_ALLOW_PREEMPT | \
+ MSM_SUBMITQUEUE_VM_BIND | \
0)
/*
@@ -388,6 +505,7 @@ struct drm_msm_submitqueue_query {
#define DRM_MSM_SUBMITQUEUE_NEW 0x0A
#define DRM_MSM_SUBMITQUEUE_CLOSE 0x0B
#define DRM_MSM_SUBMITQUEUE_QUERY 0x0C
+#define DRM_MSM_VM_BIND 0x0D
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
#define DRM_IOCTL_MSM_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SET_PARAM, struct drm_msm_param)
@@ -401,6 +519,7 @@ struct drm_msm_submitqueue_query {
#define DRM_IOCTL_MSM_SUBMITQUEUE_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, struct drm_msm_submitqueue)
#define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, __u32)
#define DRM_IOCTL_MSM_SUBMITQUEUE_QUERY DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_QUERY, struct drm_msm_submitqueue_query)
+#define DRM_IOCTL_MSM_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_VM_BIND, struct drm_msm_vm_bind)
#if defined(__cplusplus)
}
diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h
index 568724be6628..ed67510395bd 100644
--- a/include/uapi/drm/panfrost_drm.h
+++ b/include/uapi/drm/panfrost_drm.h
@@ -21,6 +21,7 @@ extern "C" {
#define DRM_PANFROST_PERFCNT_ENABLE 0x06
#define DRM_PANFROST_PERFCNT_DUMP 0x07
#define DRM_PANFROST_MADVISE 0x08
+#define DRM_PANFROST_SET_LABEL_BO 0x09
#define DRM_IOCTL_PANFROST_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit)
#define DRM_IOCTL_PANFROST_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo)
@@ -29,6 +30,7 @@ extern "C" {
#define DRM_IOCTL_PANFROST_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_PARAM, struct drm_panfrost_get_param)
#define DRM_IOCTL_PANFROST_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_BO_OFFSET, struct drm_panfrost_get_bo_offset)
#define DRM_IOCTL_PANFROST_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MADVISE, struct drm_panfrost_madvise)
+#define DRM_IOCTL_PANFROST_SET_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_SET_LABEL_BO, struct drm_panfrost_set_label_bo)
/*
* Unstable ioctl(s): only exposed when the unsafe unstable_ioctls module
@@ -227,6 +229,25 @@ struct drm_panfrost_madvise {
__u32 retained; /* out, whether backing store still exists */
};
+/**
+ * struct drm_panfrost_set_label_bo - ioctl argument for labelling Panfrost BOs.
+ */
+struct drm_panfrost_set_label_bo {
+ /** @handle: Handle of the buffer object to label. */
+ __u32 handle;
+
+ /** @pad: MBZ. */
+ __u32 pad;
+
+ /**
+ * @label: User pointer to a NUL-terminated string
+ *
+ * Length cannot be greater than 4096.
+ * NULL is permitted and means clear the label.
+ */
+ __u64 label;
+};
+
/* Definitions for coredump decoding in user space */
#define PANFROSTDUMP_MAJOR 1
#define PANFROSTDUMP_MINOR 0
diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h
index ad9a70afea6c..e1f43deb7eca 100644
--- a/include/uapi/drm/panthor_drm.h
+++ b/include/uapi/drm/panthor_drm.h
@@ -130,6 +130,20 @@ enum drm_panthor_ioctl_id {
/** @DRM_PANTHOR_BO_SET_LABEL: Label a BO. */
DRM_PANTHOR_BO_SET_LABEL,
+
+ /**
+ * @DRM_PANTHOR_SET_USER_MMIO_OFFSET: Set the offset to use as the user MMIO offset.
+ *
+ * The default behavior is to pick the MMIO offset based on the size of the pgoff_t
+ * type seen by the process that manipulates the FD, such that a 32-bit process can
+ * always map the user MMIO ranges. But this approach doesn't work well for emulators
+ * like FEX, where the emulator is an 64-bit binary which might be executing 32-bit
+ * code. In that case, the kernel thinks it's the 64-bit process and assumes
+ * DRM_PANTHOR_USER_MMIO_OFFSET_64BIT is in use, but the UMD library expects
+ * DRM_PANTHOR_USER_MMIO_OFFSET_32BIT, because it can't mmap() anything above the
+ * pgoff_t size.
+ */
+ DRM_PANTHOR_SET_USER_MMIO_OFFSET,
};
/**
@@ -296,6 +310,9 @@ struct drm_panthor_gpu_info {
/** @as_present: Bitmask encoding the number of address-space exposed by the MMU. */
__u32 as_present;
+ /** @pad0: MBZ. */
+ __u32 pad0;
+
/** @shader_present: Bitmask encoding the shader cores exposed by the GPU. */
__u64 shader_present;
@@ -999,6 +1016,28 @@ struct drm_panthor_bo_set_label {
};
/**
+ * struct drm_panthor_set_user_mmio_offset - Arguments passed to
+ * DRM_IOCTL_PANTHOR_SET_USER_MMIO_OFFSET
+ *
+ * This ioctl is only really useful if you want to support userspace
+ * CPU emulation environments where the size of an unsigned long differs
+ * between the host and the guest architectures.
+ */
+struct drm_panthor_set_user_mmio_offset {
+ /**
+ * @offset: User MMIO offset to use.
+ *
+ * Must be either DRM_PANTHOR_USER_MMIO_OFFSET_32BIT or
+ * DRM_PANTHOR_USER_MMIO_OFFSET_64BIT.
+ *
+ * Use DRM_PANTHOR_USER_MMIO_OFFSET (which selects OFFSET_32BIT or
+ * OFFSET_64BIT based on the size of an unsigned long) unless you
+ * have a very good reason to overrule this decision.
+ */
+ __u64 offset;
+};
+
+/**
* DRM_IOCTL_PANTHOR() - Build a Panthor IOCTL number
* @__access: Access type. Must be R, W or RW.
* @__id: One of the DRM_PANTHOR_xxx id.
@@ -1042,6 +1081,8 @@ enum {
DRM_IOCTL_PANTHOR(WR, TILER_HEAP_DESTROY, tiler_heap_destroy),
DRM_IOCTL_PANTHOR_BO_SET_LABEL =
DRM_IOCTL_PANTHOR(WR, BO_SET_LABEL, bo_set_label),
+ DRM_IOCTL_PANTHOR_SET_USER_MMIO_OFFSET =
+ DRM_IOCTL_PANTHOR(WR, SET_USER_MMIO_OFFSET, set_user_mmio_offset),
};
#if defined(__cplusplus)
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index 6a702ba7817c..e2426413488f 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -925,9 +925,9 @@ struct drm_xe_gem_mmap_offset {
* - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
* exec submissions to its exec_queues that don't have an upper time
* limit on the job execution time. But exec submissions to these
- * don't allow any of the flags DRM_XE_SYNC_FLAG_SYNCOBJ,
- * DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ, DRM_XE_SYNC_FLAG_DMA_BUF,
- * used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL.
+ * don't allow any of the sync types DRM_XE_SYNC_TYPE_SYNCOBJ,
+ * DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ, used as out-syncobjs, that is,
+ * together with sync flag DRM_XE_SYNC_FLAG_SIGNAL.
* LR VMs can be created in recoverable page-fault mode using
* DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it.
* If that flag is omitted, the UMD can not rely on the slightly
@@ -1394,7 +1394,7 @@ struct drm_xe_sync {
/**
* @timeline_value: Input for the timeline sync object. Needs to be
- * different than 0 when used with %DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ.
+ * different than 0 when used with %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ.
*/
__u64 timeline_value;
@@ -1617,6 +1617,9 @@ enum drm_xe_oa_unit_type {
/** @DRM_XE_OA_UNIT_TYPE_OAM: OAM OA unit */
DRM_XE_OA_UNIT_TYPE_OAM,
+
+ /** @DRM_XE_OA_UNIT_TYPE_OAM_SAG: OAM_SAG OA unit */
+ DRM_XE_OA_UNIT_TYPE_OAM_SAG,
};
/**
@@ -1638,6 +1641,7 @@ struct drm_xe_oa_unit {
#define DRM_XE_OA_CAPS_SYNCS (1 << 1)
#define DRM_XE_OA_CAPS_OA_BUFFER_SIZE (1 << 2)
#define DRM_XE_OA_CAPS_WAIT_NUM_REPORTS (1 << 3)
+#define DRM_XE_OA_CAPS_OAM (1 << 4)
/** @oa_timestamp_freq: OA timestamp freq */
__u64 oa_timestamp_freq;
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 0b4a2f124d11..233de8677382 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -450,6 +450,7 @@ union bpf_iter_link_info {
* * **struct bpf_map_info**
* * **struct bpf_btf_info**
* * **struct bpf_link_info**
+ * * **struct bpf_token_info**
*
* Return
* Returns zero on success. On error, -1 is returned and *errno*
@@ -906,6 +907,17 @@ union bpf_iter_link_info {
* A new file descriptor (a nonnegative integer), or -1 if an
* error occurred (in which case, *errno* is set appropriately).
*
+ * BPF_PROG_STREAM_READ_BY_FD
+ * Description
+ * Read data of a program's BPF stream. The program is identified
+ * by *prog_fd*, and the stream is identified by the *stream_id*.
+ * The data is copied to a buffer pointed to by *stream_buf*, and
+ * filled less than or equal to *stream_buf_len* bytes.
+ *
+ * Return
+ * Number of bytes read from the stream on success, or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
* NOTES
* eBPF objects (maps and programs) can be shared between processes.
*
@@ -961,6 +973,7 @@ enum bpf_cmd {
BPF_LINK_DETACH,
BPF_PROG_BIND_MAP,
BPF_TOKEN_CREATE,
+ BPF_PROG_STREAM_READ_BY_FD,
__MAX_BPF_CMD,
};
@@ -1463,6 +1476,11 @@ struct bpf_stack_build_id {
#define BPF_OBJ_NAME_LEN 16U
+enum {
+ BPF_STREAM_STDOUT = 1,
+ BPF_STREAM_STDERR = 2,
+};
+
union bpf_attr {
struct { /* anonymous struct used by BPF_MAP_CREATE command */
__u32 map_type; /* one of enum bpf_map_type */
@@ -1794,6 +1812,13 @@ union bpf_attr {
};
__u64 expected_revision;
} netkit;
+ struct {
+ union {
+ __u32 relative_fd;
+ __u32 relative_id;
+ };
+ __u64 expected_revision;
+ } cgroup;
};
} link_create;
@@ -1842,6 +1867,13 @@ union bpf_attr {
__u32 bpffs_fd;
} token_create;
+ struct {
+ __aligned_u64 stream_buf;
+ __u32 stream_buf_len;
+ __u32 stream_id;
+ __u32 prog_fd;
+ } prog_stream_read;
+
} __attribute__((aligned(8)));
/* The description below is an attempt at providing documentation to eBPF
@@ -2403,7 +2435,7 @@ union bpf_attr {
* into it. An example is available in file
* *samples/bpf/trace_output_user.c* in the Linux kernel source
* tree (the eBPF program counterpart is in
- * *samples/bpf/trace_output_kern.c*).
+ * *samples/bpf/trace_output.bpf.c*).
*
* **bpf_perf_event_output**\ () achieves better performance
* than **bpf_trace_printk**\ () for sharing data with user
@@ -6653,11 +6685,15 @@ struct bpf_link_info {
struct {
__aligned_u64 tp_name; /* in/out: tp_name buffer ptr */
__u32 tp_name_len; /* in/out: tp_name buffer len */
+ __u32 :32;
+ __u64 cookie;
} raw_tracepoint;
struct {
__u32 attach_type;
__u32 target_obj_id; /* prog_id for PROG_EXT, otherwise btf object id */
__u32 target_btf_id; /* BTF type id inside the object */
+ __u32 :32;
+ __u64 cookie;
} tracing;
struct {
__u64 cgroup_id;
@@ -6768,6 +6804,13 @@ struct bpf_link_info {
};
} __attribute__((aligned(8)));
+struct bpf_token_info {
+ __u64 allowed_cmds;
+ __u64 allowed_maps;
+ __u64 allowed_progs;
+ __u64 allowed_attachs;
+} __attribute__((aligned(8)));
+
/* User bpf_sock_addr struct to access socket fields and sockaddr struct passed
* by user and intended to be used by socket (e.g. to bind to, depends on
* attach type).
diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h
index 2e21b5594f81..ea5a0899ecf0 100644
--- a/include/uapi/linux/capability.h
+++ b/include/uapi/linux/capability.h
@@ -6,9 +6,10 @@
* Alexander Kjeldaas <astor@guardian.no>
* with help from Aleph1, Roland Buresund and Andrew Main.
*
- * See here for the libcap library ("POSIX draft" compliance):
+ * See here for the libcap2 library (compliant with Section 25 of
+ * the withdrawn POSIX 1003.1e Draft 17):
*
- * ftp://www.kernel.org/pub/linux/libs/security/linux-privs/kernel-2.6/
+ * https://www.kernel.org/pub/linux/libs/security/linux-privs/libcap2/
*/
#ifndef _UAPI_LINUX_CAPABILITY_H
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index a5ee0f13740a..9fcb25a0f447 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -221,6 +221,11 @@ enum devlink_port_flavour {
*/
};
+/* IEEE 802.1Qaz standard supported values. */
+
+#define DEVLINK_RATE_TCS_MAX 8
+#define DEVLINK_RATE_TC_INDEX_MAX (DEVLINK_RATE_TCS_MAX - 1)
+
enum devlink_rate_type {
DEVLINK_RATE_TYPE_LEAF,
DEVLINK_RATE_TYPE_NODE,
@@ -629,6 +634,8 @@ enum devlink_attr {
DEVLINK_ATTR_REGION_DIRECT, /* flag */
+ DEVLINK_ATTR_RATE_TC_BWS, /* nested */
+
/* Add new attributes above here, update the spec in
* Documentation/netlink/specs/devlink.yaml and re-generate
* net/devlink/netlink_gen.c.
@@ -638,6 +645,15 @@ enum devlink_attr {
DEVLINK_ATTR_MAX = __DEVLINK_ATTR_MAX - 1
};
+enum devlink_rate_tc_attr {
+ DEVLINK_RATE_TC_ATTR_UNSPEC,
+ DEVLINK_RATE_TC_ATTR_INDEX, /* u8 */
+ DEVLINK_RATE_TC_ATTR_BW, /* u32 */
+
+ __DEVLINK_RATE_TC_ATTR_MAX,
+ DEVLINK_RATE_TC_ATTR_MAX = __DEVLINK_RATE_TC_ATTR_MAX - 1
+};
+
/* Mapping between internal resource described by the field and system
* structure
*/
diff --git a/include/uapi/linux/dpll.h b/include/uapi/linux/dpll.h
index bf97d4b6d51f..37b438ce8efc 100644
--- a/include/uapi/linux/dpll.h
+++ b/include/uapi/linux/dpll.h
@@ -192,6 +192,17 @@ enum dpll_pin_capabilities {
#define DPLL_PHASE_OFFSET_DIVIDER 1000
+/**
+ * enum dpll_feature_state - Allow control (enable/disable) and status checking
+ * over features.
+ * @DPLL_FEATURE_STATE_DISABLE: feature shall be disabled
+ * @DPLL_FEATURE_STATE_ENABLE: feature shall be enabled
+ */
+enum dpll_feature_state {
+ DPLL_FEATURE_STATE_DISABLE,
+ DPLL_FEATURE_STATE_ENABLE,
+};
+
enum dpll_a {
DPLL_A_ID = 1,
DPLL_A_MODULE_NAME,
@@ -204,6 +215,7 @@ enum dpll_a {
DPLL_A_TYPE,
DPLL_A_LOCK_STATUS_ERROR,
DPLL_A_CLOCK_QUALITY_LEVEL,
+ DPLL_A_PHASE_OFFSET_MONITOR,
__DPLL_A_MAX,
DPLL_A_MAX = (__DPLL_A_MAX - 1)
@@ -237,6 +249,7 @@ enum dpll_a_pin {
DPLL_A_PIN_ESYNC_FREQUENCY,
DPLL_A_PIN_ESYNC_FREQUENCY_SUPPORTED,
DPLL_A_PIN_ESYNC_PULSE,
+ DPLL_A_PIN_REFERENCE_SYNC,
__DPLL_A_PIN_MAX,
DPLL_A_PIN_MAX = (__DPLL_A_PIN_MAX - 1)
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 707c1844010c..9e9afdd1238a 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -2314,7 +2314,7 @@ enum {
IPV6_USER_FLOW = 0x0e, /* spec only (usr_ip6_spec; nfc only) */
IPV4_FLOW = 0x10, /* hash only */
IPV6_FLOW = 0x11, /* hash only */
- ETHER_FLOW = 0x12, /* spec only (ether_spec) */
+ ETHER_FLOW = 0x12, /* hash or spec (ether_spec) */
/* Used for GTP-U IPv4 and IPv6.
* The format of GTP packets only includes
@@ -2371,7 +2371,7 @@ enum {
/* Flag to enable RSS spreading of traffic matching rule (nfc only) */
#define FLOW_RSS 0x20000000
-/* L3-L4 network traffic flow hash options */
+/* L2-L4 network traffic flow hash options */
#define RXH_L2DA (1 << 1)
#define RXH_VLAN (1 << 2)
#define RXH_L3_PROTO (1 << 3)
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
index 09a75bdb6560..fa5d645140a4 100644
--- a/include/uapi/linux/ethtool_netlink.h
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -208,6 +208,4 @@ enum {
ETHTOOL_A_STATS_PHY_MAX = (__ETHTOOL_A_STATS_PHY_CNT - 1)
};
-#define ETHTOOL_MCGRP_MONITOR_NAME "monitor"
-
#endif /* _UAPI_LINUX_ETHTOOL_NETLINK_H_ */
diff --git a/include/uapi/linux/ethtool_netlink_generated.h b/include/uapi/linux/ethtool_netlink_generated.h
index aa8ab5227c1e..e3b8813465d7 100644
--- a/include/uapi/linux/ethtool_netlink_generated.h
+++ b/include/uapi/linux/ethtool_netlink_generated.h
@@ -49,6 +49,34 @@ enum hwtstamp_source {
HWTSTAMP_SOURCE_PHYLIB,
};
+/**
+ * enum ethtool_pse_event - PSE event list for the PSE controller
+ * @ETHTOOL_PSE_EVENT_OVER_CURRENT: PSE output current is too high
+ * @ETHTOOL_PSE_EVENT_OVER_TEMP: PSE in over temperature state
+ * @ETHTOOL_C33_PSE_EVENT_DETECTION: detection process occur on the PSE. IEEE
+ * 802.3-2022 33.2.5 and 145.2.6 PSE detection of PDs. IEEE 802.3-202
+ * 30.9.1.1.5 aPSEPowerDetectionStatus
+ * @ETHTOOL_C33_PSE_EVENT_CLASSIFICATION: classification process occur on the
+ * PSE. IEEE 802.3-2022 33.2.6 and 145.2.8 classification of PDs mutual
+ * identification. IEEE 802.3-2022 30.9.1.1.8 aPSEPowerClassification.
+ * @ETHTOOL_C33_PSE_EVENT_DISCONNECTION: PD has been disconnected on the PSE.
+ * IEEE 802.3-2022 33.3.8 and 145.3.9 PD Maintain Power Signature. IEEE
+ * 802.3-2022 33.5.1.2.9 MPS Absent. IEEE 802.3-2022 30.9.1.1.20
+ * aPSEMPSAbsentCounter.
+ * @ETHTOOL_PSE_EVENT_OVER_BUDGET: PSE turned off due to over budget situation
+ * @ETHTOOL_PSE_EVENT_SW_PW_CONTROL_ERROR: PSE faced an error managing the
+ * power control from software
+ */
+enum ethtool_pse_event {
+ ETHTOOL_PSE_EVENT_OVER_CURRENT = 1,
+ ETHTOOL_PSE_EVENT_OVER_TEMP = 2,
+ ETHTOOL_C33_PSE_EVENT_DETECTION = 4,
+ ETHTOOL_C33_PSE_EVENT_CLASSIFICATION = 8,
+ ETHTOOL_C33_PSE_EVENT_DISCONNECTION = 16,
+ ETHTOOL_PSE_EVENT_OVER_BUDGET = 32,
+ ETHTOOL_PSE_EVENT_SW_PW_CONTROL_ERROR = 64,
+};
+
enum {
ETHTOOL_A_HEADER_UNSPEC,
ETHTOOL_A_HEADER_DEV_INDEX,
@@ -642,12 +670,48 @@ enum {
ETHTOOL_A_C33_PSE_EXT_SUBSTATE,
ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT,
ETHTOOL_A_C33_PSE_PW_LIMIT_RANGES,
+ ETHTOOL_A_PSE_PW_D_ID,
+ ETHTOOL_A_PSE_PRIO_MAX,
+ ETHTOOL_A_PSE_PRIO,
__ETHTOOL_A_PSE_CNT,
ETHTOOL_A_PSE_MAX = (__ETHTOOL_A_PSE_CNT - 1)
};
enum {
+ ETHTOOL_A_FLOW_ETHER = 1,
+ ETHTOOL_A_FLOW_IP4,
+ ETHTOOL_A_FLOW_IP6,
+ ETHTOOL_A_FLOW_TCP4,
+ ETHTOOL_A_FLOW_TCP6,
+ ETHTOOL_A_FLOW_UDP4,
+ ETHTOOL_A_FLOW_UDP6,
+ ETHTOOL_A_FLOW_SCTP4,
+ ETHTOOL_A_FLOW_SCTP6,
+ ETHTOOL_A_FLOW_AH4,
+ ETHTOOL_A_FLOW_AH6,
+ ETHTOOL_A_FLOW_ESP4,
+ ETHTOOL_A_FLOW_ESP6,
+ ETHTOOL_A_FLOW_AH_ESP4,
+ ETHTOOL_A_FLOW_AH_ESP6,
+ ETHTOOL_A_FLOW_GTPU4,
+ ETHTOOL_A_FLOW_GTPU6,
+ ETHTOOL_A_FLOW_GTPC4,
+ ETHTOOL_A_FLOW_GTPC6,
+ ETHTOOL_A_FLOW_GTPC_TEID4,
+ ETHTOOL_A_FLOW_GTPC_TEID6,
+ ETHTOOL_A_FLOW_GTPU_EH4,
+ ETHTOOL_A_FLOW_GTPU_EH6,
+ ETHTOOL_A_FLOW_GTPU_UL4,
+ ETHTOOL_A_FLOW_GTPU_UL6,
+ ETHTOOL_A_FLOW_GTPU_DL4,
+ ETHTOOL_A_FLOW_GTPU_DL6,
+
+ __ETHTOOL_A_FLOW_CNT,
+ ETHTOOL_A_FLOW_MAX = (__ETHTOOL_A_FLOW_CNT - 1)
+};
+
+enum {
ETHTOOL_A_RSS_UNSPEC,
ETHTOOL_A_RSS_HEADER,
ETHTOOL_A_RSS_CONTEXT,
@@ -656,6 +720,7 @@ enum {
ETHTOOL_A_RSS_HKEY,
ETHTOOL_A_RSS_INPUT_XFRM,
ETHTOOL_A_RSS_START_CONTEXT,
+ ETHTOOL_A_RSS_FLOW_HASH,
__ETHTOOL_A_RSS_CNT,
ETHTOOL_A_RSS_MAX = (__ETHTOOL_A_RSS_CNT - 1)
@@ -719,6 +784,14 @@ enum {
};
enum {
+ ETHTOOL_A_PSE_NTF_HEADER = 1,
+ ETHTOOL_A_PSE_NTF_EVENTS,
+
+ __ETHTOOL_A_PSE_NTF_CNT,
+ ETHTOOL_A_PSE_NTF_MAX = (__ETHTOOL_A_PSE_NTF_CNT - 1)
+};
+
+enum {
ETHTOOL_MSG_USER_NONE = 0,
ETHTOOL_MSG_STRSET_GET = 1,
ETHTOOL_MSG_LINKINFO_GET,
@@ -767,6 +840,9 @@ enum {
ETHTOOL_MSG_PHY_GET,
ETHTOOL_MSG_TSCONFIG_GET,
ETHTOOL_MSG_TSCONFIG_SET,
+ ETHTOOL_MSG_RSS_SET,
+ ETHTOOL_MSG_RSS_CREATE_ACT,
+ ETHTOOL_MSG_RSS_DELETE_ACT,
__ETHTOOL_MSG_USER_CNT,
ETHTOOL_MSG_USER_MAX = (__ETHTOOL_MSG_USER_CNT - 1)
@@ -822,9 +898,16 @@ enum {
ETHTOOL_MSG_PHY_NTF,
ETHTOOL_MSG_TSCONFIG_GET_REPLY,
ETHTOOL_MSG_TSCONFIG_SET_REPLY,
+ ETHTOOL_MSG_PSE_NTF,
+ ETHTOOL_MSG_RSS_NTF,
+ ETHTOOL_MSG_RSS_CREATE_ACT_REPLY,
+ ETHTOOL_MSG_RSS_CREATE_NTF,
+ ETHTOOL_MSG_RSS_DELETE_NTF,
__ETHTOOL_MSG_KERNEL_CNT,
ETHTOOL_MSG_KERNEL_MAX = (__ETHTOOL_MSG_KERNEL_CNT - 1)
};
+#define ETHTOOL_MCGRP_MONITOR_NAME "monitor"
+
#endif /* _UAPI_LINUX_ETHTOOL_NETLINK_GENERATED_H */
diff --git a/include/uapi/linux/handshake.h b/include/uapi/linux/handshake.h
index 3d7ea58778c9..662e7de46c54 100644
--- a/include/uapi/linux/handshake.h
+++ b/include/uapi/linux/handshake.h
@@ -45,6 +45,7 @@ enum {
HANDSHAKE_A_ACCEPT_PEER_IDENTITY,
HANDSHAKE_A_ACCEPT_CERTIFICATE,
HANDSHAKE_A_ACCEPT_PEERNAME,
+ HANDSHAKE_A_ACCEPT_KEYRING,
__HANDSHAKE_A_ACCEPT_MAX,
HANDSHAKE_A_ACCEPT_MAX = (__HANDSHAKE_A_ACCEPT_MAX - 1)
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 3ad2d5d98034..784ace3a519c 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -1398,6 +1398,7 @@ enum {
IFLA_VXLAN_LOCALBYPASS,
IFLA_VXLAN_LABEL_POLICY, /* IPv6 flow label policy; ifla_vxlan_label_policy */
IFLA_VXLAN_RESERVED_BITS,
+ IFLA_VXLAN_MC_ROUTE,
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
@@ -1534,6 +1535,7 @@ enum {
IFLA_BOND_MISSED_MAX,
IFLA_BOND_NS_IP6_TARGET,
IFLA_BOND_COUPLED_CONTROL,
+ IFLA_BOND_BROADCAST_NEIGH,
__IFLA_BOND_MAX,
};
diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
index 287cdc81c939..79d53c7a1ebd 100644
--- a/include/uapi/linux/if_tun.h
+++ b/include/uapi/linux/if_tun.h
@@ -93,6 +93,15 @@
#define TUN_F_USO4 0x20 /* I can handle USO for IPv4 packets */
#define TUN_F_USO6 0x40 /* I can handle USO for IPv6 packets */
+/* I can handle TSO/USO for UDP tunneled packets */
+#define TUN_F_UDP_TUNNEL_GSO 0x080
+
+/*
+ * I can handle TSO/USO for UDP tunneled packets requiring csum offload for
+ * the outer header
+ */
+#define TUN_F_UDP_TUNNEL_GSO_CSUM 0x100
+
/* Protocol info prepended to the packets (when IFF_NO_PI is not set) */
#define TUN_PKT_STRIP 0x0001
struct tun_pi {
diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h
index 44f2bb93e7e6..23a062781468 100644
--- a/include/uapi/linux/if_xdp.h
+++ b/include/uapi/linux/if_xdp.h
@@ -79,6 +79,7 @@ struct xdp_mmap_offsets {
#define XDP_UMEM_COMPLETION_RING 6
#define XDP_STATISTICS 7
#define XDP_OPTIONS 8
+#define XDP_MAX_TX_SKB_BUDGET 9
struct xdp_umem_reg {
__u64 addr; /* Start of packet data area */
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index ff8d21f9e95b..5a47339ef7d7 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -152,7 +152,6 @@ struct in6_flowlabel_req {
/*
* IPV6 socket options
*/
-#if __UAPI_DEF_IPV6_OPTIONS
#define IPV6_ADDRFORM 1
#define IPV6_2292PKTINFO 2
#define IPV6_2292HOPOPTS 3
@@ -169,8 +168,10 @@ struct in6_flowlabel_req {
#define IPV6_MULTICAST_IF 17
#define IPV6_MULTICAST_HOPS 18
#define IPV6_MULTICAST_LOOP 19
+#if __UAPI_DEF_IPV6_OPTIONS
#define IPV6_ADD_MEMBERSHIP 20
#define IPV6_DROP_MEMBERSHIP 21
+#endif
#define IPV6_ROUTER_ALERT 22
#define IPV6_MTU_DISCOVER 23
#define IPV6_MTU 24
@@ -203,7 +204,6 @@ struct in6_flowlabel_req {
#define IPV6_IPSEC_POLICY 34
#define IPV6_XFRM_POLICY 35
#define IPV6_HDRINCL 36
-#endif
/*
* Multicast:
diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h
index f29b6c44655e..c218c89e0e2e 100644
--- a/include/uapi/linux/iommufd.h
+++ b/include/uapi/linux/iommufd.h
@@ -56,6 +56,7 @@ enum {
IOMMUFD_CMD_VDEVICE_ALLOC = 0x91,
IOMMUFD_CMD_IOAS_CHANGE_PROCESS = 0x92,
IOMMUFD_CMD_VEVENTQ_ALLOC = 0x93,
+ IOMMUFD_CMD_HW_QUEUE_ALLOC = 0x94,
};
/**
@@ -591,16 +592,43 @@ struct iommu_hw_info_arm_smmuv3 {
};
/**
+ * struct iommu_hw_info_tegra241_cmdqv - NVIDIA Tegra241 CMDQV Hardware
+ * Information (IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV)
+ *
+ * @flags: Must be 0
+ * @version: Version number for the CMDQ-V HW for PARAM bits[03:00]
+ * @log2vcmdqs: Log2 of the total number of VCMDQs for PARAM bits[07:04]
+ * @log2vsids: Log2 of the total number of SID replacements for PARAM bits[15:12]
+ * @__reserved: Must be 0
+ *
+ * VMM can use these fields directly in its emulated global PARAM register. Note
+ * that only one Virtual Interface (VINTF) should be exposed to a VM, i.e. PARAM
+ * bits[11:08] should be set to 0 for log2 of the total number of VINTFs.
+ */
+struct iommu_hw_info_tegra241_cmdqv {
+ __u32 flags;
+ __u8 version;
+ __u8 log2vcmdqs;
+ __u8 log2vsids;
+ __u8 __reserved;
+};
+
+/**
* enum iommu_hw_info_type - IOMMU Hardware Info Types
- * @IOMMU_HW_INFO_TYPE_NONE: Used by the drivers that do not report hardware
+ * @IOMMU_HW_INFO_TYPE_NONE: Output by the drivers that do not report hardware
* info
+ * @IOMMU_HW_INFO_TYPE_DEFAULT: Input to request for a default type
* @IOMMU_HW_INFO_TYPE_INTEL_VTD: Intel VT-d iommu info type
* @IOMMU_HW_INFO_TYPE_ARM_SMMUV3: ARM SMMUv3 iommu info type
+ * @IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV: NVIDIA Tegra241 CMDQV (extension for ARM
+ * SMMUv3) info type
*/
enum iommu_hw_info_type {
IOMMU_HW_INFO_TYPE_NONE = 0,
+ IOMMU_HW_INFO_TYPE_DEFAULT = 0,
IOMMU_HW_INFO_TYPE_INTEL_VTD = 1,
IOMMU_HW_INFO_TYPE_ARM_SMMUV3 = 2,
+ IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV = 3,
};
/**
@@ -626,6 +654,15 @@ enum iommufd_hw_capabilities {
};
/**
+ * enum iommufd_hw_info_flags - Flags for iommu_hw_info
+ * @IOMMU_HW_INFO_FLAG_INPUT_TYPE: If set, @in_data_type carries an input type
+ * for user space to request for a specific info
+ */
+enum iommufd_hw_info_flags {
+ IOMMU_HW_INFO_FLAG_INPUT_TYPE = 1 << 0,
+};
+
+/**
* struct iommu_hw_info - ioctl(IOMMU_GET_HW_INFO)
* @size: sizeof(struct iommu_hw_info)
* @flags: Must be 0
@@ -634,6 +671,12 @@ enum iommufd_hw_capabilities {
* data that kernel supports
* @data_uptr: User pointer to a user-space buffer used by the kernel to fill
* the iommu type specific hardware information data
+ * @in_data_type: This shares the same field with @out_data_type, making it be
+ * a bidirectional field. When IOMMU_HW_INFO_FLAG_INPUT_TYPE is
+ * set, an input type carried via this @in_data_type field will
+ * be valid, requesting for the info data to the given type. If
+ * IOMMU_HW_INFO_FLAG_INPUT_TYPE is unset, any input value will
+ * be seen as IOMMU_HW_INFO_TYPE_DEFAULT
* @out_data_type: Output the iommu hardware info type as defined in the enum
* iommu_hw_info_type.
* @out_capabilities: Output the generic iommu capability info type as defined
@@ -663,7 +706,10 @@ struct iommu_hw_info {
__u32 dev_id;
__u32 data_len;
__aligned_u64 data_uptr;
- __u32 out_data_type;
+ union {
+ __u32 in_data_type;
+ __u32 out_data_type;
+ };
__u8 out_max_pasid_log2;
__u8 __reserved[3];
__aligned_u64 out_capabilities;
@@ -951,10 +997,29 @@ struct iommu_fault_alloc {
* enum iommu_viommu_type - Virtual IOMMU Type
* @IOMMU_VIOMMU_TYPE_DEFAULT: Reserved for future use
* @IOMMU_VIOMMU_TYPE_ARM_SMMUV3: ARM SMMUv3 driver specific type
+ * @IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV: NVIDIA Tegra241 CMDQV (extension for ARM
+ * SMMUv3) enabled ARM SMMUv3 type
*/
enum iommu_viommu_type {
IOMMU_VIOMMU_TYPE_DEFAULT = 0,
IOMMU_VIOMMU_TYPE_ARM_SMMUV3 = 1,
+ IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV = 2,
+};
+
+/**
+ * struct iommu_viommu_tegra241_cmdqv - NVIDIA Tegra241 CMDQV Virtual Interface
+ * (IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV)
+ * @out_vintf_mmap_offset: mmap offset argument for VINTF's page0
+ * @out_vintf_mmap_length: mmap length argument for VINTF's page0
+ *
+ * Both @out_vintf_mmap_offset and @out_vintf_mmap_length are reported by kernel
+ * for user space to mmap the VINTF page0 from the host physical address space
+ * to the guest physical address space so that a guest kernel can directly R/W
+ * access to the VINTF page0 in order to control its virtual command queues.
+ */
+struct iommu_viommu_tegra241_cmdqv {
+ __aligned_u64 out_vintf_mmap_offset;
+ __aligned_u64 out_vintf_mmap_length;
};
/**
@@ -965,6 +1030,9 @@ enum iommu_viommu_type {
* @dev_id: The device's physical IOMMU will be used to back the virtual IOMMU
* @hwpt_id: ID of a nesting parent HWPT to associate to
* @out_viommu_id: Output virtual IOMMU ID for the allocated object
+ * @data_len: Length of the type specific data
+ * @__reserved: Must be 0
+ * @data_uptr: User pointer to a driver-specific virtual IOMMU data
*
* Allocate a virtual IOMMU object, representing the underlying physical IOMMU's
* virtualization support that is a security-isolated slice of the real IOMMU HW
@@ -985,6 +1053,9 @@ struct iommu_viommu_alloc {
__u32 dev_id;
__u32 hwpt_id;
__u32 out_viommu_id;
+ __u32 data_len;
+ __u32 __reserved;
+ __aligned_u64 data_uptr;
};
#define IOMMU_VIOMMU_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VIOMMU_ALLOC)
@@ -995,10 +1066,15 @@ struct iommu_viommu_alloc {
* @dev_id: The physical device to allocate a virtual instance on the vIOMMU
* @out_vdevice_id: Object handle for the vDevice. Pass to IOMMU_DESTORY
* @virt_id: Virtual device ID per vIOMMU, e.g. vSID of ARM SMMUv3, vDeviceID
- * of AMD IOMMU, and vRID of a nested Intel VT-d to a Context Table
+ * of AMD IOMMU, and vRID of Intel VT-d
*
* Allocate a virtual device instance (for a physical device) against a vIOMMU.
* This instance holds the device's information (related to its vIOMMU) in a VM.
+ * User should use IOMMU_DESTROY to destroy the virtual device before
+ * destroying the physical device (by closing vfio_cdev fd). Otherwise the
+ * virtual device would be forcibly destroyed on physical device destruction,
+ * its vdevice_id would be permanently leaked (unremovable & unreusable) until
+ * iommu fd closed.
*/
struct iommu_vdevice_alloc {
__u32 size;
@@ -1075,10 +1151,12 @@ struct iommufd_vevent_header {
* enum iommu_veventq_type - Virtual Event Queue Type
* @IOMMU_VEVENTQ_TYPE_DEFAULT: Reserved for future use
* @IOMMU_VEVENTQ_TYPE_ARM_SMMUV3: ARM SMMUv3 Virtual Event Queue
+ * @IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV: NVIDIA Tegra241 CMDQV Extension IRQ
*/
enum iommu_veventq_type {
IOMMU_VEVENTQ_TYPE_DEFAULT = 0,
IOMMU_VEVENTQ_TYPE_ARM_SMMUV3 = 1,
+ IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV = 2,
};
/**
@@ -1103,6 +1181,19 @@ struct iommu_vevent_arm_smmuv3 {
};
/**
+ * struct iommu_vevent_tegra241_cmdqv - Tegra241 CMDQV IRQ
+ * (IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV)
+ * @lvcmdq_err_map: 128-bit logical vcmdq error map, little-endian.
+ * (Refer to register LVCMDQ_ERR_MAPs per VINTF )
+ *
+ * The 128-bit register value from HW exclusively reflect the error bits for a
+ * Virtual Interface represented by a vIOMMU object. Read and report directly.
+ */
+struct iommu_vevent_tegra241_cmdqv {
+ __aligned_le64 lvcmdq_err_map[2];
+};
+
+/**
* struct iommu_veventq_alloc - ioctl(IOMMU_VEVENTQ_ALLOC)
* @size: sizeof(struct iommu_veventq_alloc)
* @flags: Must be 0
@@ -1141,4 +1232,61 @@ struct iommu_veventq_alloc {
__u32 __reserved;
};
#define IOMMU_VEVENTQ_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VEVENTQ_ALLOC)
+
+/**
+ * enum iommu_hw_queue_type - HW Queue Type
+ * @IOMMU_HW_QUEUE_TYPE_DEFAULT: Reserved for future use
+ * @IOMMU_HW_QUEUE_TYPE_TEGRA241_CMDQV: NVIDIA Tegra241 CMDQV (extension for ARM
+ * SMMUv3) Virtual Command Queue (VCMDQ)
+ */
+enum iommu_hw_queue_type {
+ IOMMU_HW_QUEUE_TYPE_DEFAULT = 0,
+ /*
+ * TEGRA241_CMDQV requirements (otherwise, allocation will fail)
+ * - alloc starts from the lowest @index=0 in ascending order
+ * - destroy starts from the last allocated @index in descending order
+ * - @base_addr must be aligned to @length in bytes and mapped in IOAS
+ * - @length must be a power of 2, with a minimum 32 bytes and a maximum
+ * 2 ^ idr[1].CMDQS * 16 bytes (use GET_HW_INFO call to read idr[1]
+ * from struct iommu_hw_info_arm_smmuv3)
+ * - suggest to back the queue memory with contiguous physical pages or
+ * a single huge page with alignment of the queue size, and limit the
+ * emulated vSMMU's IDR1.CMDQS to log2(huge page size / 16 bytes)
+ */
+ IOMMU_HW_QUEUE_TYPE_TEGRA241_CMDQV = 1,
+};
+
+/**
+ * struct iommu_hw_queue_alloc - ioctl(IOMMU_HW_QUEUE_ALLOC)
+ * @size: sizeof(struct iommu_hw_queue_alloc)
+ * @flags: Must be 0
+ * @viommu_id: Virtual IOMMU ID to associate the HW queue with
+ * @type: One of enum iommu_hw_queue_type
+ * @index: The logical index to the HW queue per virtual IOMMU for a multi-queue
+ * model
+ * @out_hw_queue_id: The ID of the new HW queue
+ * @nesting_parent_iova: Base address of the queue memory in the guest physical
+ * address space
+ * @length: Length of the queue memory
+ *
+ * Allocate a HW queue object for a vIOMMU-specific HW-accelerated queue, which
+ * allows HW to access a guest queue memory described using @nesting_parent_iova
+ * and @length.
+ *
+ * A vIOMMU can allocate multiple queues, but it must use a different @index per
+ * type to separate each allocation, e.g::
+ *
+ * Type1 HW queue0, Type1 HW queue1, Type2 HW queue0, ...
+ */
+struct iommu_hw_queue_alloc {
+ __u32 size;
+ __u32 flags;
+ __u32 viommu_id;
+ __u32 type;
+ __u32 index;
+ __u32 out_hw_queue_id;
+ __aligned_u64 nesting_parent_iova;
+ __aligned_u64 length;
+};
+#define IOMMU_HW_QUEUE_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HW_QUEUE_ALLOC)
#endif
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index cf592d7b630f..d4d3ae774b26 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -199,6 +199,7 @@ enum {
DEVCONF_NDISC_EVICT_NOCARRIER,
DEVCONF_ACCEPT_UNTRACKED_NA,
DEVCONF_ACCEPT_RA_MIN_LFT,
+ DEVCONF_FORCE_FORWARDING,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 7a4c35ff03fe..f0f0d49d2544 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -644,6 +644,7 @@ struct kvm_ioeventfd {
#define KVM_X86_DISABLE_EXITS_HLT (1 << 1)
#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
#define KVM_X86_DISABLE_EXITS_CSTATE (1 << 3)
+#define KVM_X86_DISABLE_EXITS_APERFMPERF (1 << 4)
/* for KVM_ENABLE_CAP */
struct kvm_enable_cap {
@@ -960,6 +961,7 @@ struct kvm_enable_cap {
#define KVM_CAP_ARM_EL2 240
#define KVM_CAP_ARM_EL2_E2H0 241
#define KVM_CAP_RISCV_MP_STATE_RESET 242
+#define KVM_CAP_ARM_CACHEABLE_PFNMAP_SUPPORTED 243
struct kvm_irq_routing_irqchip {
__u32 irqchip;
diff --git a/include/uapi/linux/mctp.h b/include/uapi/linux/mctp.h
index e1db65df9359..19ad12a0cd4b 100644
--- a/include/uapi/linux/mctp.h
+++ b/include/uapi/linux/mctp.h
@@ -37,6 +37,14 @@ struct sockaddr_mctp_ext {
__u8 smctp_haddr[MAX_ADDR_LEN];
};
+/* A "fully qualified" MCTP address, which includes the system-local network ID,
+ * required to uniquely resolve a routable EID.
+ */
+struct mctp_fq_addr {
+ unsigned int net;
+ mctp_eid_t eid;
+};
+
#define MCTP_NET_ANY 0x0
#define MCTP_ADDR_NULL 0x00
diff --git a/include/uapi/linux/media/raspberrypi/pisp_be_config.h b/include/uapi/linux/media/raspberrypi/pisp_be_config.h
index cbeb714f4d61..2ad3b90684d7 100644
--- a/include/uapi/linux/media/raspberrypi/pisp_be_config.h
+++ b/include/uapi/linux/media/raspberrypi/pisp_be_config.h
@@ -21,10 +21,11 @@
/* preferred byte alignment for outputs */
#define PISP_BACK_END_OUTPUT_MAX_ALIGN 64u
-/* minimum allowed tile width anywhere in the pipeline */
-#define PISP_BACK_END_MIN_TILE_WIDTH 16u
-/* minimum allowed tile width anywhere in the pipeline */
-#define PISP_BACK_END_MIN_TILE_HEIGHT 16u
+/* minimum allowed tile sizes anywhere in the pipeline */
+#define PISP_BACK_END_MIN_TILE_WIDTH 16u
+#define PISP_BACK_END_MIN_TILE_HEIGHT 16u
+#define PISP_BACK_END_MAX_TILE_WIDTH 65536u
+#define PISP_BACK_END_MAX_TILE_HEIGHT 65536u
#define PISP_BACK_END_NUM_OUTPUTS 2
#define PISP_BACK_END_HOG_OUTPUT 1
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index b851c36ad25d..c34a81245f87 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -54,6 +54,7 @@ enum {
/* Extended flags under NDA_FLAGS_EXT: */
#define NTF_EXT_MANAGED (1 << 0)
#define NTF_EXT_LOCKED (1 << 1)
+#define NTF_EXT_EXT_VALIDATED (1 << 2)
/*
* Neighbor Cache Entry States.
@@ -92,6 +93,10 @@ enum {
* bridge in response to a host trying to communicate via a locked bridge port
* with MAB enabled. Their purpose is to notify user space that a host requires
* authentication.
+ *
+ * NTF_EXT_EXT_VALIDATED flagged neighbor entries were externally validated by
+ * a user space control plane. The kernel will not remove or invalidate them,
+ * but it can probe them and notify user space when they become reachable.
*/
struct nda_cacheinfo {
diff --git a/include/uapi/linux/net_dropmon.h b/include/uapi/linux/net_dropmon.h
index 9dd41c2f58a6..87cbef48d4c7 100644
--- a/include/uapi/linux/net_dropmon.h
+++ b/include/uapi/linux/net_dropmon.h
@@ -10,13 +10,6 @@ struct net_dm_drop_point {
__u32 count;
};
-#define is_drop_point_hw(x) do {\
- int ____i, ____j;\
- for (____i = 0; ____i < 8; i ____i++)\
- ____j |= x[____i];\
- ____j;\
-} while (0)
-
#define NET_DM_CFG_VERSION 0
#define NET_DM_CFG_ALERT_COUNT 1
#define NET_DM_CFG_ALERT_DELAY 2
diff --git a/include/uapi/linux/netconf.h b/include/uapi/linux/netconf.h
index fac4edd55379..1c8c84d65ae3 100644
--- a/include/uapi/linux/netconf.h
+++ b/include/uapi/linux/netconf.h
@@ -19,6 +19,7 @@ enum {
NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
NETCONFA_INPUT,
NETCONFA_BC_FORWARDING,
+ NETCONFA_FORCE_FORWARDING,
__NETCONFA_MAX
};
#define NETCONFA_MAX (__NETCONFA_MAX - 1)
diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h
index 7eb9571786b8..48eb49aa03d4 100644
--- a/include/uapi/linux/netdev.h
+++ b/include/uapi/linux/netdev.h
@@ -77,6 +77,11 @@ enum netdev_qstats_scope {
NETDEV_QSTATS_SCOPE_QUEUE = 1,
};
+enum netdev_napi_threaded {
+ NETDEV_NAPI_THREADED_DISABLED,
+ NETDEV_NAPI_THREADED_ENABLED,
+};
+
enum {
NETDEV_A_DEV_IFINDEX = 1,
NETDEV_A_DEV_PAD,
@@ -134,6 +139,7 @@ enum {
NETDEV_A_NAPI_DEFER_HARD_IRQS,
NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT,
NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT,
+ NETDEV_A_NAPI_THREADED,
__NETDEV_A_NAPI_MAX,
NETDEV_A_NAPI_MAX = (__NETDEV_A_NAPI_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nfnetlink_hook.h b/include/uapi/linux/netfilter/nfnetlink_hook.h
index 84a561a74b98..1a2c4d6424b5 100644
--- a/include/uapi/linux/netfilter/nfnetlink_hook.h
+++ b/include/uapi/linux/netfilter/nfnetlink_hook.h
@@ -61,10 +61,12 @@ enum nfnl_hook_chain_desc_attributes {
*
* @NFNL_HOOK_TYPE_NFTABLES: nf_tables base chain
* @NFNL_HOOK_TYPE_BPF: bpf program
+ * @NFNL_HOOK_TYPE_NFT_FLOWTABLE: nf_tables flowtable
*/
enum nfnl_hook_chaintype {
NFNL_HOOK_TYPE_NFTABLES = 0x1,
NFNL_HOOK_TYPE_BPF,
+ NFNL_HOOK_TYPE_NFT_FLOWTABLE,
};
/**
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index e9ccf43fe3c6..d1a14f2892d9 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -1330,7 +1330,15 @@
* TID to Link mapping for downlink/uplink traffic.
*
* @NL80211_CMD_ASSOC_MLO_RECONF: For a non-AP MLD station, request to
- * add/remove links to/from the association.
+ * add/remove links to/from the association. To indicate link
+ * reconfiguration request results from the driver, this command is also
+ * used as an event to notify userspace about the added links information.
+ * For notifying the removed links information, the existing
+ * %NL80211_CMD_LINKS_REMOVED command is used. This command is also used to
+ * notify userspace about newly added links for the current connection in
+ * case of AP-initiated link recommendation requests, received via
+ * a BTM (BSS Transition Management) request or a link reconfig notify
+ * frame, where the driver handles the link recommendation offload.
*
* @NL80211_CMD_EPCS_CFG: EPCS configuration for a station. Used by userland to
* control EPCS configuration. Used to notify userland on the current state
@@ -2899,6 +2907,27 @@ enum nl80211_commands {
* APs Support". Drivers may set additional flags that they support
* in the kernel or device.
*
+ * @NL80211_ATTR_WIPHY_RADIO_INDEX: (int) Integer attribute denoting the index
+ * of the radio in interest. Internally a value of -1 is used to
+ * indicate that the radio id is not given in user-space. This means
+ * that all the attributes are applicable to all the radios. If there is
+ * a radio index provided in user-space, the attributes will be
+ * applicable to that specific radio only. If the radio id is greater
+ * thank the number of radios, error denoting invalid value is returned.
+ *
+ * @NL80211_ATTR_S1G_LONG_BEACON_PERIOD: (u8) Integer attribute that represents
+ * the number of beacon intervals between each long beacon transmission
+ * for an S1G BSS with short beaconing enabled. This is a required
+ * attribute for initialising an S1G short beaconing BSS. When updating
+ * the short beacon data, this is not required. It has a minimum value of
+ * 2 (i.e 2 beacon intervals).
+ *
+ * @NL80211_ATTR_S1G_SHORT_BEACON: Nested attribute containing the short beacon
+ * head and tail used to set or update the short beacon templates. When
+ * bringing up a new interface, %NL80211_ATTR_S1G_LONG_BEACON_PERIOD is
+ * required alongside this attribute. Refer to
+ * @enum nl80211_s1g_short_beacon_attrs for the attribute definitions.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -3456,6 +3485,11 @@ enum nl80211_attrs {
NL80211_ATTR_ASSOC_MLD_EXT_CAPA_OPS,
+ NL80211_ATTR_WIPHY_RADIO_INDEX,
+
+ NL80211_ATTR_S1G_LONG_BEACON_PERIOD,
+ NL80211_ATTR_S1G_SHORT_BEACON,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -8088,6 +8122,7 @@ enum nl80211_ap_settings_flags {
* and contains attributes defined in &enum nl80211_if_combination_attrs.
* @NL80211_WIPHY_RADIO_ATTR_ANTENNA_MASK: bitmask (u32) of antennas
* connected to this radio.
+ * @NL80211_WIPHY_RADIO_ATTR_RTS_THRESHOLD: RTS threshold (u32) of this radio.
*
* @__NL80211_WIPHY_RADIO_ATTR_LAST: Internal
* @NL80211_WIPHY_RADIO_ATTR_MAX: Highest attribute
@@ -8099,6 +8134,7 @@ enum nl80211_wiphy_radio_attrs {
NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE,
NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION,
NL80211_WIPHY_RADIO_ATTR_ANTENNA_MASK,
+ NL80211_WIPHY_RADIO_ATTR_RTS_THRESHOLD,
/* keep last */
__NL80211_WIPHY_RADIO_ATTR_LAST,
@@ -8128,4 +8164,27 @@ enum nl80211_wiphy_radio_freq_range {
NL80211_WIPHY_RADIO_FREQ_ATTR_MAX = __NL80211_WIPHY_RADIO_FREQ_ATTR_LAST - 1,
};
+/**
+ * enum nl80211_s1g_short_beacon_attrs - S1G short beacon data
+ *
+ * @__NL80211_S1G_SHORT_BEACON_ATTR_INVALID: Invalid
+ *
+ * @NL80211_S1G_SHORT_BEACON_ATTR_HEAD: Short beacon head (binary).
+ * @NL80211_S1G_SHORT_BEACON_ATTR_TAIL: Short beacon tail (binary).
+ *
+ * @__NL80211_S1G_SHORT_BEACON_ATTR_LAST: Internal
+ * @NL80211_S1G_SHORT_BEACON_ATTR_MAX: Highest attribute
+ */
+enum nl80211_s1g_short_beacon_attrs {
+ __NL80211_S1G_SHORT_BEACON_ATTR_INVALID,
+
+ NL80211_S1G_SHORT_BEACON_ATTR_HEAD,
+ NL80211_S1G_SHORT_BEACON_ATTR_TAIL,
+
+ /* keep last */
+ __NL80211_S1G_SHORT_BEACON_ATTR_LAST,
+ NL80211_S1G_SHORT_BEACON_ATTR_MAX =
+ __NL80211_S1G_SHORT_BEACON_ATTR_LAST - 1
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 3a701bd1f31b..3092c2c6f1d2 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -186,6 +186,11 @@ enum ovs_packet_cmd {
* %OVS_PACKET_ATTR_USERSPACE action specify the Maximum received fragment
* size.
* @OVS_PACKET_ATTR_HASH: Packet hash info (e.g. hash, sw_hash and l4_hash in skb).
+ * @OVS_PACKET_ATTR_UPCALL_PID: Netlink PID to use for upcalls while
+ * processing %OVS_PACKET_CMD_EXECUTE. Takes precedence over all other ways
+ * to determine the Netlink PID including %OVS_USERSPACE_ATTR_PID,
+ * %OVS_DP_ATTR_UPCALL_PID, %OVS_DP_ATTR_PER_CPU_PIDS and the
+ * %OVS_VPORT_ATTR_UPCALL_PID.
*
* These attributes follow the &struct ovs_header within the Generic Netlink
* payload for %OVS_PACKET_* commands.
@@ -205,6 +210,7 @@ enum ovs_packet_attr {
OVS_PACKET_ATTR_MRU, /* Maximum received IP fragment size. */
OVS_PACKET_ATTR_LEN, /* Packet size before truncation. */
OVS_PACKET_ATTR_HASH, /* Packet hash. */
+ OVS_PACKET_ATTR_UPCALL_PID, /* u32 Netlink PID. */
__OVS_PACKET_ATTR_MAX
};
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 3e41349f3fa2..c2da76e78bad 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -1211,4 +1211,72 @@ enum {
#define TCA_ETS_MAX (__TCA_ETS_MAX - 1)
+/* DUALPI2 */
+enum tc_dualpi2_drop_overload {
+ TC_DUALPI2_DROP_OVERLOAD_OVERFLOW = 0,
+ TC_DUALPI2_DROP_OVERLOAD_DROP = 1,
+ __TCA_DUALPI2_DROP_OVERLOAD_MAX,
+};
+#define TCA_DUALPI2_DROP_OVERLOAD_MAX (__TCA_DUALPI2_DROP_OVERLOAD_MAX - 1)
+
+enum tc_dualpi2_drop_early {
+ TC_DUALPI2_DROP_EARLY_DROP_DEQUEUE = 0,
+ TC_DUALPI2_DROP_EARLY_DROP_ENQUEUE = 1,
+ __TCA_DUALPI2_DROP_EARLY_MAX,
+};
+#define TCA_DUALPI2_DROP_EARLY_MAX (__TCA_DUALPI2_DROP_EARLY_MAX - 1)
+
+enum tc_dualpi2_ecn_mask {
+ TC_DUALPI2_ECN_MASK_L4S_ECT = 1,
+ TC_DUALPI2_ECN_MASK_CLA_ECT = 2,
+ TC_DUALPI2_ECN_MASK_ANY_ECT = 3,
+ __TCA_DUALPI2_ECN_MASK_MAX,
+};
+#define TCA_DUALPI2_ECN_MASK_MAX (__TCA_DUALPI2_ECN_MASK_MAX - 1)
+
+enum tc_dualpi2_split_gso {
+ TC_DUALPI2_SPLIT_GSO_NO_SPLIT_GSO = 0,
+ TC_DUALPI2_SPLIT_GSO_SPLIT_GSO = 1,
+ __TCA_DUALPI2_SPLIT_GSO_MAX,
+};
+#define TCA_DUALPI2_SPLIT_GSO_MAX (__TCA_DUALPI2_SPLIT_GSO_MAX - 1)
+
+enum {
+ TCA_DUALPI2_UNSPEC,
+ TCA_DUALPI2_LIMIT, /* Packets */
+ TCA_DUALPI2_MEMORY_LIMIT, /* Bytes */
+ TCA_DUALPI2_TARGET, /* us */
+ TCA_DUALPI2_TUPDATE, /* us */
+ TCA_DUALPI2_ALPHA, /* Hz scaled up by 256 */
+ TCA_DUALPI2_BETA, /* Hz scaled up by 256 */
+ TCA_DUALPI2_STEP_THRESH_PKTS, /* Step threshold in packets */
+ TCA_DUALPI2_STEP_THRESH_US, /* Step threshold in microseconds */
+ TCA_DUALPI2_MIN_QLEN_STEP, /* Minimum qlen to apply STEP_THRESH */
+ TCA_DUALPI2_COUPLING, /* Coupling factor between queues */
+ TCA_DUALPI2_DROP_OVERLOAD, /* Whether to drop on overload */
+ TCA_DUALPI2_DROP_EARLY, /* Whether to drop on enqueue */
+ TCA_DUALPI2_C_PROTECTION, /* Percentage */
+ TCA_DUALPI2_ECN_MASK, /* L4S queue classification mask */
+ TCA_DUALPI2_SPLIT_GSO, /* Split GSO packets at enqueue */
+ TCA_DUALPI2_PAD,
+ __TCA_DUALPI2_MAX
+};
+
+#define TCA_DUALPI2_MAX (__TCA_DUALPI2_MAX - 1)
+
+struct tc_dualpi2_xstats {
+ __u32 prob; /* current probability */
+ __u32 delay_c; /* current delay in C queue */
+ __u32 delay_l; /* current delay in L queue */
+ __u32 packets_in_c; /* number of packets enqueued in C queue */
+ __u32 packets_in_l; /* number of packets enqueued in L queue */
+ __u32 maxq; /* maximum queue size */
+ __u32 ecn_mark; /* packets marked with ecn*/
+ __u32 step_marks; /* ECN marks due to the step AQM */
+ __s32 credit; /* current c_protection credit */
+ __u32 memory_used; /* Memory used by both queues */
+ __u32 max_memory_used; /* Maximum used memory */
+ __u32 memory_limit; /* Memory limit of both queues */
+};
+
#endif
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 475fc8ca4403..ed3aed264aeb 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -244,6 +244,8 @@ struct prctl_mm_map {
# define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
/* Unused; kept only for source compatibility */
# define PR_MTE_TCF_SHIFT 1
+/* MTE tag check store only */
+# define PR_MTE_STORE_ONLY (1UL << 19)
/* RISC-V pointer masking tag length */
# define PR_PMLEN_SHIFT 24
# define PR_PMLEN_MASK (0x7fUL << PR_PMLEN_SHIFT)
diff --git a/include/uapi/linux/rkisp1-config.h b/include/uapi/linux/rkisp1-config.h
index 2d995f3c1ca3..3b060ea6eed7 100644
--- a/include/uapi/linux/rkisp1-config.h
+++ b/include/uapi/linux/rkisp1-config.h
@@ -170,6 +170,13 @@
#define RKISP1_CIF_ISP_COMPAND_NUM_POINTS 64
/*
+ * Wide Dynamic Range
+ */
+#define RKISP1_CIF_ISP_WDR_CURVE_NUM_INTERV 32
+#define RKISP1_CIF_ISP_WDR_CURVE_NUM_COEFF (RKISP1_CIF_ISP_WDR_CURVE_NUM_INTERV + 1)
+#define RKISP1_CIF_ISP_WDR_CURVE_NUM_DY_REGS 4
+
+/*
* Measurement types
*/
#define RKISP1_CIF_ISP_STAT_AWB (1U << 0)
@@ -889,6 +896,72 @@ struct rkisp1_cif_isp_compand_curve_config {
__u32 y[RKISP1_CIF_ISP_COMPAND_NUM_POINTS];
};
+/**
+ * struct rkisp1_cif_isp_wdr_tone_curve - Tone mapping curve definition for WDR.
+ *
+ * @dY: the dYn increments for horizontal (input) axis of the tone curve.
+ * each 3-bit dY value represents an increment of 2**(value+3).
+ * dY[0] bits 0:2 is increment dY1, bit 3 unused
+ * dY[0] bits 4:6 is increment dY2, bit 7 unused
+ * ...
+ * dY[0] bits 28:30 is increment dY8, bit 31 unused
+ * ... and so on till dY[3] bits 28:30 is increment dY32, bit 31 unused.
+ * @ym: the Ym values for the vertical (output) axis of the tone curve.
+ * each value is 13 bit.
+ */
+struct rkisp1_cif_isp_wdr_tone_curve {
+ __u32 dY[RKISP1_CIF_ISP_WDR_CURVE_NUM_DY_REGS];
+ __u16 ym[RKISP1_CIF_ISP_WDR_CURVE_NUM_COEFF];
+};
+
+/**
+ * struct rkisp1_cif_isp_wdr_iref_config - Illumination reference config for WDR.
+ *
+ * Use illumination reference value as described below, instead of only the
+ * luminance (Y) value for tone mapping and gain calculations:
+ * IRef = (rgb_factor * RGBMax_tr + (8 - rgb_factor) * Y)/8
+ *
+ * @rgb_factor: defines how much influence the RGBmax approach has in
+ * comparison to Y (valid values are 0..8).
+ * @use_y9_8: use Y*9/8 for maximum value calculation along with the
+ * default of R, G, B for noise reduction.
+ * @use_rgb7_8: decrease RGBMax by 7/8 for noise reduction.
+ * @disable_transient: disable transient calculation between Y and RGBY_max.
+ */
+struct rkisp1_cif_isp_wdr_iref_config {
+ __u8 rgb_factor;
+ __u8 use_y9_8;
+ __u8 use_rgb7_8;
+ __u8 disable_transient;
+};
+
+/**
+ * struct rkisp1_cif_isp_wdr_config - Configuration for wide dynamic range.
+ *
+ * @tone_curve: tone mapping curve.
+ * @iref_config: illumination reference configuration. (when use_iref is true)
+ * @rgb_offset: RGB offset value for RGB operation mode. (12 bits)
+ * @luma_offset: luminance offset value for RGB operation mode. (12 bits)
+ * @dmin_thresh: lower threshold for deltaMin value. (12 bits)
+ * @dmin_strength: strength factor for deltaMin. (valid range is 0x00..0x10)
+ * @use_rgb_colorspace: use RGB instead of luminance/chrominance colorspace.
+ * @bypass_chroma_mapping: disable chrominance mapping (only valid if
+ * use_rgb_colorspace = 0)
+ * @use_iref: use illumination reference instead of Y for tone mapping
+ * and gain calculations.
+ */
+struct rkisp1_cif_isp_wdr_config {
+ struct rkisp1_cif_isp_wdr_tone_curve tone_curve;
+ struct rkisp1_cif_isp_wdr_iref_config iref_config;
+ __u16 rgb_offset;
+ __u16 luma_offset;
+ __u16 dmin_thresh;
+ __u8 dmin_strength;
+ __u8 use_rgb_colorspace;
+ __u8 bypass_chroma_mapping;
+ __u8 use_iref;
+};
+
/*---------- PART2: Measurement Statistics ------------*/
/**
@@ -1059,6 +1132,7 @@ struct rkisp1_stat_buffer {
* @RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_BLS: BLS in the compand block
* @RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_EXPAND: Companding expand curve
* @RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_COMPRESS: Companding compress curve
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_WDR: Wide dynamic range
*/
enum rkisp1_ext_params_block_type {
RKISP1_EXT_PARAMS_BLOCK_TYPE_BLS,
@@ -1081,11 +1155,15 @@ enum rkisp1_ext_params_block_type {
RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_BLS,
RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_EXPAND,
RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_COMPRESS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_WDR,
};
#define RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE (1U << 0)
#define RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE (1U << 1)
+/* A bitmask of parameters blocks supported on the current hardware. */
+#define RKISP1_CID_SUPPORTED_PARAMS_BLOCKS (V4L2_CID_USER_RKISP1_BASE + 0x01)
+
/**
* struct rkisp1_ext_params_block_header - RkISP1 extensible parameters block
* header
@@ -1460,6 +1538,23 @@ struct rkisp1_ext_params_compand_curve_config {
struct rkisp1_cif_isp_compand_curve_config config;
} __attribute__((aligned(8)));
+/**
+ * struct rkisp1_ext_params_wdr_config - RkISP1 extensible params
+ * Wide dynamic range config
+ *
+ * RkISP1 extensible parameters WDR block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_WDR`
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: WDR configuration, see
+ * :c:type:`rkisp1_cif_isp_wdr_config`
+ */
+struct rkisp1_ext_params_wdr_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_wdr_config config;
+} __attribute__((aligned(8)));
+
/*
* The rkisp1_ext_params_compand_curve_config structure is counted twice as it
* is used for both the COMPAND_EXPAND and COMPAND_COMPRESS block types.
@@ -1484,7 +1579,8 @@ struct rkisp1_ext_params_compand_curve_config {
sizeof(struct rkisp1_ext_params_afc_config) +\
sizeof(struct rkisp1_ext_params_compand_bls_config) +\
sizeof(struct rkisp1_ext_params_compand_curve_config) +\
- sizeof(struct rkisp1_ext_params_compand_curve_config))
+ sizeof(struct rkisp1_ext_params_compand_curve_config) +\
+ sizeof(struct rkisp1_ext_params_wdr_config))
/**
* enum rksip1_ext_param_buffer_version - RkISP1 extensible parameters version
@@ -1520,6 +1616,14 @@ enum rksip1_ext_param_buffer_version {
* V4L2 control. If such control is not available, userspace should assume only
* RKISP1_EXT_PARAM_BUFFER_V1 is supported by the driver.
*
+ * The read-only V4L2 control ``RKISP1_CID_SUPPORTED_PARAMS_BLOCKS`` can be used
+ * to query the blocks supported by the device. It contains a bitmask where each
+ * bit represents the availability of the corresponding entry from the
+ * :c:type:`rkisp1_ext_params_block_type` enum. The current and default values
+ * of the control represents the blocks supported by the device instance, while
+ * the maximum value represents the blocks supported by the kernel driver,
+ * independently of the device instance.
+ *
* For each ISP block that userspace wants to configure, a block-specific
* structure is appended to the @data buffer, one after the other without gaps
* in between nor overlaps. Userspace shall populate the @data_size field with
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 1d234d7e1892..49f5640092a0 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -186,6 +186,7 @@ enum
LINUX_MIB_TIMEWAITKILLED, /* TimeWaitKilled */
LINUX_MIB_PAWSACTIVEREJECTED, /* PAWSActiveRejected */
LINUX_MIB_PAWSESTABREJECTED, /* PAWSEstabRejected */
+ LINUX_MIB_BEYOND_WINDOW, /* BeyondWindow */
LINUX_MIB_TSECRREJECTED, /* TSEcrRejected */
LINUX_MIB_PAWS_OLD_ACK, /* PAWSOldAck */
LINUX_MIB_PAWS_TW_REJECTED, /* PAWSTimewait */
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index 8981f00204db..63d1464cb71c 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -573,6 +573,7 @@ enum {
NET_IPV6_ACCEPT_RA_FROM_LOCAL=26,
NET_IPV6_ACCEPT_RA_RT_INFO_MIN_PLEN=27,
NET_IPV6_RA_DEFRTR_METRIC=28,
+ NET_IPV6_FORCE_FORWARDING=29,
__NET_IPV6_MAX
};
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 72e32814ea83..f836512e9deb 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -222,6 +222,12 @@ enum v4l2_colorfx {
*/
#define V4L2_CID_USER_UVC_BASE (V4L2_CID_USER_BASE + 0x11e0)
+/*
+ * The base for Rockchip ISP1 driver controls.
+ * We reserve 16 controls for this driver.
+ */
+#define V4L2_CID_USER_RKISP1_BASE (V4L2_CID_USER_BASE + 0x1220)
+
/* MPEG-class control IDs */
/* The MPEG controls are applicable to all codec controls
* and the 'MPEG' part of the define is historical */
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index d4b3e2ae1314..d6ad01fbb8d2 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -235,4 +235,11 @@
*/
#define VHOST_VDPA_GET_VRING_SIZE _IOWR(VHOST_VIRTIO, 0x82, \
struct vhost_vring_state)
+
+/* Extended features manipulation */
+#define VHOST_GET_FEATURES_ARRAY _IOR(VHOST_VIRTIO, 0x83, \
+ struct vhost_features_array)
+#define VHOST_SET_FEATURES_ARRAY _IOW(VHOST_VIRTIO, 0x83, \
+ struct vhost_features_array)
+
#endif
diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h
index d7656908f730..1c39cc5f5a31 100644
--- a/include/uapi/linux/vhost_types.h
+++ b/include/uapi/linux/vhost_types.h
@@ -110,6 +110,11 @@ struct vhost_msg_v2 {
};
};
+struct vhost_features_array {
+ __u64 count; /* number of entries present in features array */
+ __u64 features[] __counted_by(count);
+};
+
struct vhost_memory_region {
__u64 guest_phys_addr;
__u64 memory_size; /* bytes */
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 9e3b366d5fc7..3dd9fa45dde1 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -726,7 +726,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_SGBRG12 v4l2_fourcc('G', 'B', '1', '2') /* 12 GBGB.. RGRG.. */
#define V4L2_PIX_FMT_SGRBG12 v4l2_fourcc('B', 'A', '1', '2') /* 12 GRGR.. BGBG.. */
#define V4L2_PIX_FMT_SRGGB12 v4l2_fourcc('R', 'G', '1', '2') /* 12 RGRG.. GBGB.. */
- /* 12bit raw bayer packed, 6 bytes for every 4 pixels */
+ /* 12bit raw bayer packed, 3 bytes for every 2 pixels */
#define V4L2_PIX_FMT_SBGGR12P v4l2_fourcc('p', 'B', 'C', 'C')
#define V4L2_PIX_FMT_SGBRG12P v4l2_fourcc('p', 'G', 'C', 'C')
#define V4L2_PIX_FMT_SGRBG12P v4l2_fourcc('p', 'g', 'C', 'C')
@@ -840,6 +840,12 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_PISP_COMP2_BGGR v4l2_fourcc('P', 'C', '2', 'B') /* PiSP 8-bit mode 2 compressed BGGR bayer */
#define V4L2_PIX_FMT_PISP_COMP2_MONO v4l2_fourcc('P', 'C', '2', 'M') /* PiSP 8-bit mode 2 compressed monochrome */
+/* Renesas RZ/V2H CRU packed formats. 64-bit units with contiguous pixels */
+#define V4L2_PIX_FMT_RAW_CRU10 v4l2_fourcc('C', 'R', '1', '0')
+#define V4L2_PIX_FMT_RAW_CRU12 v4l2_fourcc('C', 'R', '1', '2')
+#define V4L2_PIX_FMT_RAW_CRU14 v4l2_fourcc('C', 'R', '1', '4')
+#define V4L2_PIX_FMT_RAW_CRU20 v4l2_fourcc('C', 'R', '2', '0')
+
/* SDR formats - used only for Software Defined Radio devices */
#define V4L2_SDR_FMT_CU8 v4l2_fourcc('C', 'U', '0', '8') /* IQ u8 */
#define V4L2_SDR_FMT_CU16LE v4l2_fourcc('C', 'U', '1', '6') /* IQ u16le */
@@ -861,6 +867,7 @@ struct v4l2_pix_format {
#define V4L2_META_FMT_VSP1_HGT v4l2_fourcc('V', 'S', 'P', 'T') /* R-Car VSP1 2-D Histogram */
#define V4L2_META_FMT_UVC v4l2_fourcc('U', 'V', 'C', 'H') /* UVC Payload Header metadata */
#define V4L2_META_FMT_D4XX v4l2_fourcc('D', '4', 'X', 'X') /* D4XX Payload Header metadata */
+#define V4L2_META_FMT_UVC_MSXU_1_5 v4l2_fourcc('U', 'V', 'C', 'M') /* UVC MSXU metadata */
#define V4L2_META_FMT_VIVID v4l2_fourcc('V', 'I', 'V', 'D') /* Vivid Metadata */
/* Vendor specific - used for RK_ISP1 camera sub-system */
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index 963540deae66..8bf27ab8bcb4 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -70,6 +70,28 @@
* with the same MAC.
*/
#define VIRTIO_NET_F_SPEED_DUPLEX 63 /* Device set linkspeed and duplex */
+#define VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO 65 /* Driver can receive
+ * GSO-over-UDP-tunnel packets
+ */
+#define VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM 66 /* Driver handles
+ * GSO-over-UDP-tunnel
+ * packets with partial csum
+ * for the outer header
+ */
+#define VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO 67 /* Device can receive
+ * GSO-over-UDP-tunnel packets
+ */
+#define VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO_CSUM 68 /* Device handles
+ * GSO-over-UDP-tunnel
+ * packets with partial csum
+ * for the outer header
+ */
+
+/* Offloads bits corresponding to VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO{,_CSUM}
+ * features
+ */
+#define VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_MAPPED 46
+#define VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM_MAPPED 47
#ifndef VIRTIO_NET_NO_LEGACY
#define VIRTIO_NET_F_GSO 6 /* Host handles pkts w/ any GSO type */
@@ -131,12 +153,17 @@ struct virtio_net_hdr_v1 {
#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* Use csum_start, csum_offset */
#define VIRTIO_NET_HDR_F_DATA_VALID 2 /* Csum is valid */
#define VIRTIO_NET_HDR_F_RSC_INFO 4 /* rsc info in csum_ fields */
+#define VIRTIO_NET_HDR_F_UDP_TUNNEL_CSUM 8 /* UDP tunnel csum offload */
__u8 flags;
#define VIRTIO_NET_HDR_GSO_NONE 0 /* Not a GSO frame */
#define VIRTIO_NET_HDR_GSO_TCPV4 1 /* GSO frame, IPv4 TCP (TSO) */
#define VIRTIO_NET_HDR_GSO_UDP 3 /* GSO frame, IPv4 UDP (UFO) */
#define VIRTIO_NET_HDR_GSO_TCPV6 4 /* GSO frame, IPv6 TCP */
#define VIRTIO_NET_HDR_GSO_UDP_L4 5 /* GSO frame, IPv4& IPv6 UDP (USO) */
+#define VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV4 0x20 /* UDPv4 tunnel present */
+#define VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6 0x40 /* UDPv6 tunnel present */
+#define VIRTIO_NET_HDR_GSO_UDP_TUNNEL (VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV4 | \
+ VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6)
#define VIRTIO_NET_HDR_GSO_ECN 0x80 /* TCP has ECN set */
__u8 gso_type;
__virtio16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */
@@ -181,6 +208,12 @@ struct virtio_net_hdr_v1_hash {
__le16 padding;
};
+struct virtio_net_hdr_v1_hash_tunnel {
+ struct virtio_net_hdr_v1_hash hash_hdr;
+ __le16 outer_th_offset;
+ __le16 inner_nh_offset;
+};
+
#ifndef VIRTIO_NET_NO_LEGACY
/* This header comes first in the scatter-gather list.
* For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated, it must
diff --git a/include/uapi/rdma/efa-abi.h b/include/uapi/rdma/efa-abi.h
index 11b94b0b035b..98b71b9979f8 100644
--- a/include/uapi/rdma/efa-abi.h
+++ b/include/uapi/rdma/efa-abi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
/*
- * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef EFA_ABI_USER_H
@@ -131,6 +131,7 @@ enum {
EFA_QUERY_DEVICE_CAPS_DATA_POLLING_128 = 1 << 4,
EFA_QUERY_DEVICE_CAPS_RDMA_WRITE = 1 << 5,
EFA_QUERY_DEVICE_CAPS_UNSOLICITED_WRITE_RECV = 1 << 6,
+ EFA_QUERY_DEVICE_CAPS_CQ_WITH_EXT_MEM = 1 << 7,
};
struct efa_ibv_ex_query_device_resp {
diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h
index ac7b162611ed..de6f5a94f1e3 100644
--- a/include/uapi/rdma/ib_user_ioctl_cmds.h
+++ b/include/uapi/rdma/ib_user_ioctl_cmds.h
@@ -55,6 +55,7 @@ enum uverbs_default_objects {
UVERBS_OBJECT_DM,
UVERBS_OBJECT_COUNTERS,
UVERBS_OBJECT_ASYNC_EVENT,
+ UVERBS_OBJECT_DMAH,
};
enum {
@@ -105,6 +106,10 @@ enum uverbs_attrs_create_cq_cmd_attr_ids {
UVERBS_ATTR_CREATE_CQ_FLAGS,
UVERBS_ATTR_CREATE_CQ_RESP_CQE,
UVERBS_ATTR_CREATE_CQ_EVENT_FD,
+ UVERBS_ATTR_CREATE_CQ_BUFFER_VA,
+ UVERBS_ATTR_CREATE_CQ_BUFFER_LENGTH,
+ UVERBS_ATTR_CREATE_CQ_BUFFER_FD,
+ UVERBS_ATTR_CREATE_CQ_BUFFER_OFFSET,
};
enum uverbs_attrs_destroy_cq_cmd_attr_ids {
@@ -236,6 +241,22 @@ enum uverbs_methods_dm {
UVERBS_METHOD_DM_FREE,
};
+enum uverbs_attrs_alloc_dmah_cmd_attr_ids {
+ UVERBS_ATTR_ALLOC_DMAH_HANDLE,
+ UVERBS_ATTR_ALLOC_DMAH_CPU_ID,
+ UVERBS_ATTR_ALLOC_DMAH_TPH_MEM_TYPE,
+ UVERBS_ATTR_ALLOC_DMAH_PH,
+};
+
+enum uverbs_attrs_free_dmah_cmd_attr_ids {
+ UVERBS_ATTR_FREE_DMA_HANDLE,
+};
+
+enum uverbs_methods_dmah {
+ UVERBS_METHOD_DMAH_ALLOC,
+ UVERBS_METHOD_DMAH_FREE,
+};
+
enum uverbs_attrs_reg_dm_mr_cmd_attr_ids {
UVERBS_ATTR_REG_DM_MR_HANDLE,
UVERBS_ATTR_REG_DM_MR_OFFSET,
@@ -253,6 +274,7 @@ enum uverbs_methods_mr {
UVERBS_METHOD_ADVISE_MR,
UVERBS_METHOD_QUERY_MR,
UVERBS_METHOD_REG_DMABUF_MR,
+ UVERBS_METHOD_REG_MR,
};
enum uverbs_attrs_mr_destroy_ids {
@@ -286,6 +308,20 @@ enum uverbs_attrs_reg_dmabuf_mr_cmd_attr_ids {
UVERBS_ATTR_REG_DMABUF_MR_RESP_RKEY,
};
+enum uverbs_attrs_reg_mr_cmd_attr_ids {
+ UVERBS_ATTR_REG_MR_HANDLE,
+ UVERBS_ATTR_REG_MR_PD_HANDLE,
+ UVERBS_ATTR_REG_MR_DMA_HANDLE,
+ UVERBS_ATTR_REG_MR_IOVA,
+ UVERBS_ATTR_REG_MR_ADDR,
+ UVERBS_ATTR_REG_MR_LENGTH,
+ UVERBS_ATTR_REG_MR_ACCESS_FLAGS,
+ UVERBS_ATTR_REG_MR_FD,
+ UVERBS_ATTR_REG_MR_FD_OFFSET,
+ UVERBS_ATTR_REG_MR_RESP_LKEY,
+ UVERBS_ATTR_REG_MR_RESP_RKEY,
+};
+
enum uverbs_attrs_create_counters_cmd_attr_ids {
UVERBS_ATTR_CREATE_COUNTERS_HANDLE,
};
diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h
index c0c59a8f7256..72fd385037a6 100644
--- a/include/ufs/ufs.h
+++ b/include/ufs/ufs.h
@@ -182,6 +182,11 @@ enum attr_idn {
QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F,
QUERY_ATTR_IDN_TIMESTAMP = 0x30,
QUERY_ATTR_IDN_DEV_LVL_EXCEPTION_ID = 0x34,
+ QUERY_ATTR_IDN_HID_DEFRAG_OPERATION = 0x35,
+ QUERY_ATTR_IDN_HID_AVAILABLE_SIZE = 0x36,
+ QUERY_ATTR_IDN_HID_SIZE = 0x37,
+ QUERY_ATTR_IDN_HID_PROGRESS_RATIO = 0x38,
+ QUERY_ATTR_IDN_HID_STATE = 0x39,
QUERY_ATTR_IDN_WB_BUF_RESIZE_HINT = 0x3C,
QUERY_ATTR_IDN_WB_BUF_RESIZE_EN = 0x3D,
QUERY_ATTR_IDN_WB_BUF_RESIZE_STATUS = 0x3E,
@@ -401,6 +406,7 @@ enum {
UFS_DEV_HPB_SUPPORT = BIT(7),
UFS_DEV_WRITE_BOOSTER_SUP = BIT(8),
UFS_DEV_LVL_EXCEPTION_SUP = BIT(12),
+ UFS_DEV_HID_SUPPORT = BIT(13),
};
#define UFS_DEV_HPB_SUPPORT_VERSION 0x310
@@ -466,6 +472,24 @@ enum ufs_ref_clk_freq {
REF_CLK_FREQ_INVAL = -1,
};
+/* bDefragOperation attribute values */
+enum ufs_hid_defrag_operation {
+ HID_ANALYSIS_AND_DEFRAG_DISABLE = 0,
+ HID_ANALYSIS_ENABLE = 1,
+ HID_ANALYSIS_AND_DEFRAG_ENABLE = 2,
+};
+
+/* bHIDState attribute values */
+enum ufs_hid_state {
+ HID_IDLE = 0,
+ ANALYSIS_IN_PROGRESS = 1,
+ DEFRAG_REQUIRED = 2,
+ DEFRAG_IN_PROGRESS = 3,
+ DEFRAG_COMPLETED = 4,
+ DEFRAG_NOT_REQUIRED = 5,
+ NUM_UFS_HID_STATES = 6,
+};
+
/* bWriteBoosterBufferResizeEn attribute */
enum wb_resize_en {
WB_RESIZE_EN_IDLE = 0,
@@ -625,6 +649,8 @@ struct ufs_dev_info {
u32 rtc_update_period;
u8 rtt_cap; /* bDeviceRTTCap */
+
+ bool hid_sup;
};
/*
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 9b3515cee711..1d3943777584 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -1480,6 +1480,7 @@ void ufshcd_resume_complete(struct device *dev);
bool ufshcd_is_hba_active(struct ufs_hba *hba);
void ufshcd_pm_qos_init(struct ufs_hba *hba);
void ufshcd_pm_qos_exit(struct ufs_hba *hba);
+int ufshcd_dme_rmw(struct ufs_hba *hba, u32 mask, u32 val, u32 attr);
/* Wrapper functions for safely calling variant operations */
static inline int ufshcd_vops_init(struct ufs_hba *hba)
diff --git a/include/video/edid.h b/include/video/edid.h
index f614371e9116..c2b186b1933a 100644
--- a/include/video/edid.h
+++ b/include/video/edid.h
@@ -4,7 +4,8 @@
#include <uapi/video/edid.h>
-#ifdef CONFIG_X86
+#if defined(CONFIG_FIRMWARE_EDID)
extern struct edid_info edid_info;
#endif
+
#endif /* __linux_video_edid_h__ */
diff --git a/include/video/sisfb.h b/include/video/sisfb.h
index 76ff628a1220..54e6632cd4a2 100644
--- a/include/video/sisfb.h
+++ b/include/video/sisfb.h
@@ -15,10 +15,4 @@
#define SIS_300_VGA 1
#define SIS_315_VGA 2
-#define SISFB_HAVE_MALLOC_NEW
-extern void sis_malloc(struct sis_memreq *req);
-extern void sis_malloc_new(struct pci_dev *pdev, struct sis_memreq *req);
-
-extern void sis_free(u32 base);
-extern void sis_free_new(struct pci_dev *pdev, u32 base);
#endif
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 47f11bec5e90..9e2a769b0d96 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -30,13 +30,11 @@ void xen_arch_suspend(void);
void xen_reboot(int reason);
void xen_resume_notifier_register(struct notifier_block *nb);
-void xen_resume_notifier_unregister(struct notifier_block *nb);
bool xen_vcpu_stolen(int vcpu);
void xen_setup_runstate_info(int cpu);
void xen_time_setup_guest(void);
void xen_manage_runstate_time(int action);
-void xen_get_runstate_snapshot(struct vcpu_runstate_info *res);
u64 xen_steal_clock(int cpu);
int xen_setup_shutdown_event(void);
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 3f90bdd387b6..1c23e6387f13 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -154,8 +154,6 @@ void *xenbus_read(struct xenbus_transaction t,
const char *dir, const char *node, unsigned int *len);
int xenbus_write(struct xenbus_transaction t,
const char *dir, const char *node, const char *string);
-int xenbus_mkdir(struct xenbus_transaction t,
- const char *dir, const char *node);
int xenbus_exists(struct xenbus_transaction t,
const char *dir, const char *node);
int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node);